prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
print("\n===================================================================================================")
import argparse
import copy
import gc
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
import h5py
import os
import random
from tqdm import tqdm
import torch
import torchvision
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision.utils import save_image
import timeit
from PIL import Image
from opts import parse_opts
args = parse_opts()
wd = args.root_path
os.chdir(wd)
from utils import *
from models import *
from trainer import train_cgan, sample_cgan_given_labels
from eval_metrics import cal_FID, cal_labelscore, inception_score
#######################################################################################
''' Settings '''
#######################################################################################
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
#-------------------------------
# output folders
path_to_output = os.path.join(wd, "output/output_arch_{}_dimz_{}_lrg_{}_lrd_{}".format(args.GAN_arch, args.dim_z, args.lr_g, args.lr_d))
os.makedirs(path_to_output, exist_ok=True)
save_models_folder = os.path.join(path_to_output, 'saved_models')
os.makedirs(save_models_folder, exist_ok=True)
save_images_folder = os.path.join(path_to_output, 'saved_images')
os.makedirs(save_images_folder, exist_ok=True)
#######################################################################################
''' Data loader '''
#######################################################################################
# data loader
data_filename = args.data_path + '/UTKFace_{}x{}.h5'.format(args.img_size, args.img_size)
hf = h5py.File(data_filename, 'r')
labels = hf['labels'][:]
labels = labels.astype(float)
images = hf['images'][:]
hf.close()
# subset of UTKFace
selected_labels = np.arange(args.min_label, args.max_label+1)
for i in range(len(selected_labels)):
curr_label = selected_labels[i]
index_curr_label = np.where(labels==curr_label)[0]
if i == 0:
images_subset = images[index_curr_label]
labels_subset = labels[index_curr_label]
else:
images_subset = np.concatenate((images_subset, images[index_curr_label]), axis=0)
labels_subset = np.concatenate((labels_subset, labels[index_curr_label]))
# for i
images = images_subset
labels = labels_subset
del images_subset, labels_subset; gc.collect()
raw_images = copy.deepcopy(images)
raw_labels = copy.deepcopy(labels)
### show some real images
if args.show_real_imgs:
unique_labels_show = sorted(list(set(labels)))
nrow = len(unique_labels_show); ncol = 10
images_show = np.zeros((nrow*ncol, images.shape[1], images.shape[2], images.shape[3]))
for i in range(nrow):
curr_label = unique_labels_show[i]
indx_curr_label = np.where(labels==curr_label)[0]
np.random.shuffle(indx_curr_label)
indx_curr_label = indx_curr_label[0:ncol]
for j in range(ncol):
images_show[i*ncol+j,:,:,:] = images[indx_curr_label[j]]
print(images_show.shape)
images_show = (images_show/255.0-0.5)/0.5
images_show = torch.from_numpy(images_show)
save_image(images_show.data, save_images_folder +'/real_images_grid_{}x{}.png'.format(nrow, ncol), nrow=ncol, normalize=True)
# for each age, take no more than args.max_num_img_per_label images
image_num_threshold = args.max_num_img_per_label
print("\n Original set has {} images; For each age, take no more than {} images>>>".format(len(images), image_num_threshold))
unique_labels_tmp = np.sort(np.array(list(set(labels))))
for i in tqdm(range(len(unique_labels_tmp))):
indx_i = np.where(labels == unique_labels_tmp[i])[0]
if len(indx_i)>image_num_threshold:
np.random.shuffle(indx_i)
indx_i = indx_i[0:image_num_threshold]
if i == 0:
sel_indx = indx_i
else:
sel_indx = np.concatenate((sel_indx, indx_i))
images = images[sel_indx]
labels = labels[sel_indx]
print("{} images left.".format(len(images)))
hist_filename = wd + "/histogram_before_replica_unnormalized_age_" + str(args.img_size) + 'x' + str(args.img_size)
num_bins = len(list(set(labels)))
plt.figure()
plt.hist(labels, num_bins, facecolor='blue', density=False)
plt.savefig(hist_filename)
## replicate minority samples to alleviate the imbalance
max_num_img_per_label_after_replica = np.min([args.max_num_img_per_label_after_replica, args.max_num_img_per_label])
if max_num_img_per_label_after_replica>1:
unique_labels_replica = np.sort(np.array(list(set(labels))))
num_labels_replicated = 0
print("Start replicating monority samples >>>")
for i in tqdm(range(len(unique_labels_replica))):
# print((i, num_labels_replicated))
curr_label = unique_labels_replica[i]
indx_i = np.where(labels == curr_label)[0]
if len(indx_i) < max_num_img_per_label_after_replica:
num_img_less = max_num_img_per_label_after_replica - len(indx_i)
indx_replica = np.random.choice(indx_i, size = num_img_less, replace=True)
if num_labels_replicated == 0:
images_replica = images[indx_replica]
labels_replica = labels[indx_replica]
else:
images_replica = np.concatenate((images_replica, images[indx_replica]), axis=0)
labels_replica = np.concatenate((labels_replica, labels[indx_replica]))
num_labels_replicated+=1
#end for i
images = | np.concatenate((images, images_replica), axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
#%% Import required libraries
import numpy as np
import numpy.ma as ma
import random
import math
from initialise import Initialise_model, Initialise_inputs
#%% Core model stochastic script
def Stochastic_Process(j):
Profile, num_profiles = Initialise_model(j)
peak_enlarg, mu_peak, s_peak, Year_behaviour, User_list = Initialise_inputs(j)
'''
Calculation of the peak time range, which is used to discriminate between off-peak and on-peak coincident switch-on probability
Calculates first the overall Peak Window (taking into account all User classes).
The peak window is just a time window in which coincident switch-on of multiple appliances assumes a higher probability than off-peak
Within the peak window, a random peak time is calculated and then enlarged into a peak_time_range following again a random procedure
'''
windows_curve = np.zeros(1440) #creates an empty daily profile
Tot_curve = np.zeros(1440) #creates another empty daily profile
for Us in User_list:
App_count = 0
for App in Us.App_list:
#Calculate windows curve, i.e. the theoretical maximum curve that can be obtained, for each app, by switching-on always all the 'n' apps altogether in any time-step of the functioning windows
single_wcurve = Us.App_list[App_count].daily_use*np.mean(Us.App_list[App_count].POWER)*Us.App_list[App_count].number #this computes the curve for the specific App
windows_curve = np.vstack([windows_curve, single_wcurve]) #this stacks the specific App curve in an overall curve comprising all the Apps within a User class
App_count += 1
Us.windows_curve = windows_curve #after having iterated for all the Apps within a User class, saves the overall User class theoretical maximum curve
Us.windows_curve = np.transpose(np.sum(Us.windows_curve, axis = 0))*Us.num_users
Tot_curve = Tot_curve + Us.windows_curve #adds the User's theoretical max profile to the total theoretical max comprising all classes
peak_window = np.transpose(np.argwhere(Tot_curve == np.amax(Tot_curve))) #Find the peak window within the theoretical max profile
peak_time = round(random.normalvariate(round(np.average(peak_window)),1/3*(peak_window[0,-1]-peak_window[0,0]))) #Within the peak_window, randomly calculate the peak_time using a gaussian distribution
peak_time_range = np.arange((peak_time-round(math.fabs(peak_time-(random.gauss(peak_time,(peak_enlarg*peak_time)))))),(peak_time+round(math.fabs(peak_time-random.gauss(peak_time,(peak_enlarg*peak_time)))))) #the peak_time is randomly enlarged based on the calibration parameter peak_enlarg
'''
The core stochastic process starts here. For each profile requested by the software user,
each Appliance instance within each User instance is separately and stochastically generated
'''
for prof_i in range(num_profiles): #the whole code is repeated for each profile that needs to be generated
Tot_Classes = np.zeros(1440) #initialise an empty daily profile that will be filled with the sum of the hourly profiles of each User instance
for Us in User_list: #iterates for each User instance (i.e. for each user class)
Us.load = np.zeros(1440) #initialise empty load for User instance
for i in range(Us.num_users): #iterates for every single user within a User class. Each single user has its own separate randomisation
if Us.user_preference == 0:
rand_daily_pref = 0
pass
else:
rand_daily_pref = random.randint(1,Us.user_preference)
for App in Us.App_list: #iterates for all the App types in the given User class
#initialises variables for the cycle
tot_time = 0
App.daily_use = np.zeros(1440)
if random.uniform(0,1) > App.occasional_use: #evaluates if occasional use happens or not
continue
else:
pass
if App.Pref_index == 0:
pass
else:
if rand_daily_pref == App.Pref_index: #evaluates if daily preference coincides with the randomised daily preference number
pass
else:
continue
if App.wd_we == Year_behaviour[prof_i] or App.wd_we == 2 : #checks if the app is allowed in the given yearly behaviour pattern
pass
else:
continue
#recalculate windows start and ending times randomly, based on the inputs
rand_window_1 = np.array([int(random.uniform((App.window_1[0]-App.random_var_1),(App.window_1[0]+App.random_var_1))),int(random.uniform((App.window_1[1]-App.random_var_1),(App.window_1[1]+App.random_var_1)))])
if rand_window_1[0] < 0:
rand_window_1[0] = 0
if rand_window_1[1] > 1440:
rand_window_1[1] = 1440
rand_window_2 = np.array([int(random.uniform((App.window_2[0]-App.random_var_2),(App.window_2[0]+App.random_var_2))),int(random.uniform((App.window_2[1]-App.random_var_2),(App.window_2[1]+App.random_var_2)))])
if rand_window_2[0] < 0:
rand_window_2[0] = 0
if rand_window_2[1] > 1440:
rand_window_2[1] = 1440
rand_window_3 = np.array([int(random.uniform((App.window_3[0]-App.random_var_3),(App.window_3[0]+App.random_var_3))),int(random.uniform((App.window_3[1]-App.random_var_3),(App.window_3[1]+App.random_var_3)))])
if rand_window_3[0] < 0:
rand_window_3[0] = 0
if rand_window_3[1] > 1440:
rand_window_3[1] = 1440
#redefines functioning windows based on the previous randomisation of the boundaries
if App.flat == 'yes': #if the app is "flat" the code stops right after filling the newly created windows without applying any further stochasticity
App.daily_use[rand_window_1[0]:rand_window_1[1]] = np.full(np.diff(rand_window_1),App.POWER[prof_i]*App.number)
App.daily_use[rand_window_2[0]:rand_window_2[1]] = np.full(np.diff(rand_window_2),App.POWER[prof_i]*App.number)
App.daily_use[rand_window_3[0]:rand_window_3[1]] = np.full(np.diff(rand_window_3),App.POWER[prof_i]*App.number)
Us.load = Us.load + App.daily_use
continue
else: #otherwise, for "non-flat" apps it puts a mask on the newly defined windows and continues
App.daily_use[rand_window_1[0]:rand_window_1[1]] = np.full(np.diff(rand_window_1),0.001)
App.daily_use[rand_window_2[0]:rand_window_2[1]] = np.full(np.diff(rand_window_2),0.001)
App.daily_use[rand_window_3[0]:rand_window_3[1]] = np.full(np.diff(rand_window_3),0.001)
App.daily_use_masked = np.zeros_like(ma.masked_not_equal(App.daily_use,0.001))
App.power = App.POWER[prof_i]
#random variability is applied to the total functioning time and to the duration of the duty cycles, if they have been specified
random_var_t = random.uniform((1-App.r_t),(1+App.r_t))
if App.activate == 1:
App.p_11 = App.P_11*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_12 = App.P_12*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
random_cycle1 = np.concatenate(((np.ones(int(App.t_11*(random.uniform((1+App.r_c1),(1-App.r_c1)))))*App.p_11),(np.ones(int(App.t_12*(random.uniform((1+App.r_c1),(1-App.r_c1)))))*App.p_12))) #randomise also the fixed cycle
random_cycle2 = random_cycle1
random_cycle3 = random_cycle1
elif App.activate == 2:
App.p_11 = App.P_11*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_12 = App.P_12*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_21 = App.P_21*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_22 = App.P_22*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
random_cycle1 = np.concatenate(((np.ones(int(App.t_11*(random.uniform((1+App.r_c1),(1-App.r_c1)))))*App.p_11),(np.ones(int(App.t_12*(random.uniform((1+App.r_c1),(1-App.r_c1)))))*App.p_12))) #randomise also the fixed cycle
random_cycle2 = np.concatenate(((np.ones(int(App.t_21*(random.uniform((1+App.r_c2),(1-App.r_c2)))))*App.p_21),(np.ones(int(App.t_22*(random.uniform((1+App.r_c2),(1-App.r_c2)))))*App.p_22))) #randomise also the fixed cycle
random_cycle3 = random_cycle1
elif App.activate == 3:
App.p_11 = App.P_11*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_12 = App.P_12*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_21 = App.P_12*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_22 = App.P_22*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_31 = App.P_31*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
App.p_32 = App.P_32*(random.uniform((1-App.Thermal_P_var),(1+App.Thermal_P_var))) #randomly variates the power of thermal apps, otherwise variability is 0
random_cycle1 = random.choice([np.concatenate(((np.ones(int(App.t_11*(random.uniform((1+App.r_c1),(1-App.r_c1)))))*App.p_11),(np.ones(int(App.t_12*(random.uniform((1+App.r_c1),(1-App.r_c1)))))*App.p_12))),np.concatenate(((np.ones(int(App.t_12*(random.uniform((1+App.r_c1),(1-App.r_c1)))))*App.p_12),(np.ones(int(App.t_11*(random.uniform((1+App.r_c1),(1-App.r_c1)))))*App.p_11)))]) #randomise also the fixed cycle
random_cycle2 = random.choice([np.concatenate(((np.ones(int(App.t_21*(random.uniform((1+App.r_c2),(1-App.r_c2)))))*App.p_21),(np.ones(int(App.t_22*(random.uniform((1+App.r_c2),(1-App.r_c2)))))*App.p_22))),np.concatenate(((np.ones(int(App.t_22*(random.uniform((1+App.r_c2),(1-App.r_c2)))))*App.p_22),(np.ones(int(App.t_21*(random.uniform((1+App.r_c2),(1-App.r_c2)))))*App.p_21)))])
random_cycle3 = random.choice([np.concatenate(((np.ones(int(App.t_31*(random.uniform((1+App.r_c3),(1-App.r_c3)))))*App.p_31),(np.ones(int(App.t_32*(random.uniform((1+App.r_c3),(1-App.r_c3)))))*App.p_32))),np.concatenate(((np.ones(int(App.t_32*(random.uniform((1+App.r_c3),(1-App.r_c3)))))*App.p_32),(np.ones(int(App.t_31*(random.uniform((1+App.r_c3),(1-App.r_c3)))))*App.p_31)))])#this is to avoid that all cycles are sincronous
else:
pass
rand_time = round(random.uniform(App.func_time,int(App.func_time*random_var_t)))
#control to check that the total randomised time of use does not exceed the total space available in the windows
if rand_time > 0.99*(np.diff(rand_window_1)+np.diff(rand_window_2)+np.diff(rand_window_3)):
rand_time = int(0.99*(np.diff(rand_window_1)+np.diff(rand_window_2)+np.diff(rand_window_3)))
max_free_spot = rand_time #free spots are used to detect if there's still space for switch_ons. Before calculating actual free spots, the max free spot is set equal to the entire randomised func_time
while tot_time <= rand_time: #this is the key cycle, which runs for each App until the switch_ons and their duration equals the randomised total time of use of the App
#check how many windows to consider
if App.num_windows == 1:
switch_on = int(random.choice([random.uniform(rand_window_1[0],(rand_window_1[1]))]))
elif App.num_windows == 2:
switch_on = int(random.choice([random.uniform(rand_window_1[0],(rand_window_1[1])),random.uniform(rand_window_2[0],(rand_window_2[1]))]))
else:
switch_on = int(random.choice([random.uniform(rand_window_1[0],(rand_window_1[1])),random.uniform(rand_window_2[0],(rand_window_2[1])),random.uniform(rand_window_3[0],(rand_window_3[1]))]))
#Identifies a random switch on time within the available functioning windows
if App.daily_use[switch_on] == 0.001: #control to check if the app is not already on at the randomly selected switch-on time
if switch_on in range(rand_window_1[0],rand_window_1[1]):
if np.any(App.daily_use[switch_on:rand_window_1[1]]!=0.001): #control to check if there are any other switch on times after the current one
next_switch = [switch_on + k[0] for k in np.where(App.daily_use[switch_on:]!=0.001)] #identifies the position of next switch on time and sets it as a limit for the duration of the current switch on
if (next_switch[0] - switch_on) >= App.func_cycle and max_free_spot >= App.func_cycle:
upper_limit = min((next_switch[0]-switch_on),min(rand_time,rand_window_1[1]-switch_on))
elif (next_switch[0] - switch_on) < App.func_cycle and max_free_spot >= App.func_cycle: #if next switch_on event does not allow for a minimum functioning cycle without overlapping, but there are other larger free spots, the cycle tries again from the beginning
continue
else:
upper_limit = next_switch[0]-switch_on #if there are no other options to reach the total time of use, empty spaces are filled without minimum cycle restrictions until reaching the limit
else:
upper_limit = min(rand_time,rand_window_1[1]-switch_on) #if there are no other switch-on events after the current one, the upper duration limit is set this way
if upper_limit >= App.func_cycle: #if the upper limit is higher than minimum functioning time, an array of indexes is created to be later put in the profile
indexes = np.arange(switch_on,switch_on+(int(random.uniform(App.func_cycle,upper_limit)))) #a random duration is chosen between the upper limit and the minimum cycle
else:
indexes = np.arange(switch_on,switch_on+upper_limit) #this is the case in which empty spaces need to be filled without constraints to reach the total time goal
elif switch_on in range(rand_window_2[0],rand_window_2[1]): #if random switch_on happens in windows2, same code as above is repeated for windows2
if np.any(App.daily_use[switch_on:rand_window_2[1]]!=0.001):
next_switch = [switch_on + k[0] for k in np.where(App.daily_use[switch_on:]!=0.001)]
if (next_switch[0] - switch_on) >= App.func_cycle and max_free_spot >= App.func_cycle:
upper_limit = min((next_switch[0]-switch_on),min(rand_time,rand_window_2[1]-switch_on))
elif (next_switch[0] - switch_on) < App.func_cycle and max_free_spot >= App.func_cycle:
continue
else:
upper_limit = next_switch[0]-switch_on
else:
upper_limit = min(rand_time,rand_window_2[1]-switch_on)
if upper_limit >= App.func_cycle:
indexes = np.arange(switch_on,switch_on+(int(random.uniform(App.func_cycle,upper_limit))))
else:
indexes = np.arange(switch_on,switch_on+upper_limit)
else: #if switch_on is not in window1 nor in window2, it shall be in window3. Same code is repreated
if np.any(App.daily_use[switch_on:rand_window_3[1]]!=0.001):
next_switch = [switch_on + k[0] for k in np.where(App.daily_use[switch_on:]!=0.001)]
if (next_switch[0] - switch_on) >= App.func_cycle and max_free_spot >= App.func_cycle:
upper_limit = min((next_switch[0]-switch_on),min(rand_time,rand_window_3[1]-switch_on))
elif (next_switch[0] - switch_on) < App.func_cycle and max_free_spot >= App.func_cycle:
continue
else:
upper_limit = next_switch[0]-switch_on
else:
upper_limit = min(rand_time,rand_window_3[1]-switch_on)
if upper_limit >= App.func_cycle:
indexes = np.arange(switch_on,switch_on+(int(random.uniform(App.func_cycle,upper_limit))))
else:
indexes = | np.arange(switch_on,switch_on+upper_limit) | numpy.arange |
"""Tests for neurodsp.sim.aperiodic."""
import numpy as np
from scipy.optimize import curve_fit
from neurodsp.tests.settings import N_SECONDS, FS, EXP1, EXP2, KNEE, EPS
from neurodsp.tests.tutils import check_sim_output
from neurodsp.sim.aperiodic import *
from neurodsp.sim.aperiodic import _create_powerlaw
from neurodsp.spectral import compute_spectrum
###################################################################################################
###################################################################################################
def test_sim_poisson_pop():
sig = sim_poisson_pop(N_SECONDS, FS)
check_sim_output(sig)
def test_sim_synaptic_current():
sig = sim_synaptic_current(N_SECONDS, FS)
check_sim_output(sig)
def test_sim_knee():
# Build the signal and run a smoke test
sig = sim_knee(N_SECONDS, FS, EXP1, EXP2, KNEE)
check_sim_output(sig)
# Check against the power spectrum when you take the Fourier transform
sig_len = int(FS*N_SECONDS)
freqs = np.linspace(0, FS/2, num=sig_len//2, endpoint=True)
# Ignore the DC component to avoid division by zero in the Lorentzian
freqs = freqs[1:]
true_psd = np.array([1/(freq**-EXP1*(freq**(-EXP2-EXP1) + KNEE)) for freq in freqs])
# Only look at the frequencies (ignoring DC component) up to the nyquist rate
sig_hat = np.fft.fft(sig)[1:sig_len//2]
numerical_psd = np.abs(sig_hat)**2
np.allclose(true_psd, numerical_psd, atol=EPS)
# Accuracy test for a single exponent
sig = sim_knee(n_seconds=N_SECONDS, fs=FS, chi1=0, chi2=EXP2, knee=KNEE)
freqs, powers = compute_spectrum(sig, FS, f_range=(1, 200))
def _estimate_single_knee(xs, offset, knee, exponent):
return | np.zeros_like(xs) | numpy.zeros_like |
"""
YTArray class.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
import numpy as np
from distutils.version import LooseVersion
from functools import wraps
from numpy import \
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
floor_divide, negative, power, remainder, mod, absolute, rint, \
sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \
reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \
bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
try:
# numpy 1.13 or newer
from numpy import positive, divmod as divmod_, isnat, heaviside
except ImportError:
positive, divmod_, isnat, heaviside = (None,)*4
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
from yt.units.dimensions import \
angle, \
current_mks, \
dimensionless, \
em_dimensions
from yt.utilities.exceptions import \
YTUnitOperationError, YTUnitConversionError, \
YTUfuncUnitError, YTIterableUnitCoercionError, \
YTInvalidUnitEquivalence, YTEquivalentDimsError
from yt.utilities.lru_cache import lru_cache
from numbers import Number as numeric_type
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
from .pint_conversions import convert_pint_units
NULL_UNIT = Unit()
POWER_SIGN_MAPPING = {multiply: 1, divide: -1}
# redefine this here to avoid a circular import from yt.funcs
def iterable(obj):
try: len(obj)
except: return False
return True
def return_arr(func):
@wraps(func)
def wrapped(*args, **kwargs):
ret, units = func(*args, **kwargs)
if ret.shape == ():
return YTQuantity(ret, units)
else:
# This could be a subclass, so don't call YTArray directly.
return type(args[0])(ret, units)
return wrapped
@lru_cache(maxsize=128, typed=False)
def sqrt_unit(unit):
return unit**0.5
@lru_cache(maxsize=128, typed=False)
def multiply_units(unit1, unit2):
return unit1 * unit2
def preserve_units(unit1, unit2=None):
return unit1
@lru_cache(maxsize=128, typed=False)
def power_unit(unit, power):
return unit**power
@lru_cache(maxsize=128, typed=False)
def square_unit(unit):
return unit*unit
@lru_cache(maxsize=128, typed=False)
def divide_units(unit1, unit2):
return unit1/unit2
@lru_cache(maxsize=128, typed=False)
def reciprocal_unit(unit):
return unit**-1
def passthrough_unit(unit, unit2=None):
return unit
def return_without_unit(unit, unit2=None):
return None
def arctan2_unit(unit1, unit2):
return NULL_UNIT
def comparison_unit(unit1, unit2=None):
return None
def invert_units(unit):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def bitop_units(unit1, unit2):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def get_inp_u_unary(ufunc, inputs, out_arr=None):
inp = inputs[0]
u = getattr(inp, 'units', None)
if u is None:
u = NULL_UNIT
if u.dimensions is angle and ufunc in trigonometric_operators:
inp = inp.in_units('radian').v
if out_arr is not None:
out_arr = ufunc(inp).view(np.ndarray)
return out_arr, inp, u
def get_inp_u_binary(ufunc, inputs):
inp1 = coerce_iterable_units(inputs[0])
inp2 = coerce_iterable_units(inputs[1])
unit1 = getattr(inp1, 'units', None)
unit2 = getattr(inp2, 'units', None)
ret_class = get_binary_op_return_class(type(inp1), type(inp2))
if unit1 is None:
unit1 = Unit(registry=getattr(unit2, 'registry', None))
if unit2 is None and ufunc is not power:
unit2 = Unit(registry=getattr(unit1, 'registry', None))
elif ufunc is power:
unit2 = inp2
if isinstance(unit2, np.ndarray):
if isinstance(unit2, YTArray):
if unit2.units.is_dimensionless:
pass
else:
raise YTUnitOperationError(ufunc, unit1, unit2)
unit2 = 1.0
return (inp1, inp2), (unit1, unit2), ret_class
def handle_preserve_units(inps, units, ufunc, ret_class):
if units[0] != units[1]:
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
else:
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_multiply_divide_units(unit, units, out, out_arr):
if unit.is_dimensionless and unit.base_value != 1.0:
if not units[0].is_dimensionless:
if units[0].dimensions == units[1].dimensions:
out_arr = np.multiply(out_arr.view(np.ndarray),
unit.base_value, out=out)
unit = Unit(registry=unit.registry)
return out, out_arr, unit
def coerce_iterable_units(input_object):
if isinstance(input_object, np.ndarray):
return input_object
if iterable(input_object):
if any([isinstance(o, YTArray) for o in input_object]):
ff = getattr(input_object[0], 'units', NULL_UNIT, )
if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
raise YTIterableUnitCoercionError(input_object)
# This will create a copy of the data in the iterable.
return YTArray(input_object)
return input_object
else:
return input_object
def sanitize_units_mul(this_object, other_object):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# If the other object is a YTArray and has the same dimensions as the object
# under consideration, convert so we don't mix units with the same
# dimensions.
if isinstance(ret, YTArray):
if inp.units.same_dimensions_as(ret.units):
ret.in_units(inp.units)
return ret
def sanitize_units_add(this_object, other_object, op_string):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# Make sure the other object is a YTArray before we use the `units`
# attribute.
if isinstance(ret, YTArray):
if not inp.units.same_dimensions_as(ret.units):
# handle special case of adding or subtracting with zero or
# array filled with zero
if not np.any(other_object):
return ret.view(np.ndarray)
elif not np.any(this_object):
return ret
raise YTUnitOperationError(op_string, inp.units, ret.units)
ret = ret.in_units(inp.units)
else:
# If the other object is not a YTArray, then one of the arrays must be
# dimensionless or filled with zeros
if not inp.units.is_dimensionless and np.any(ret):
raise YTUnitOperationError(op_string, inp.units, dimensionless)
return ret
def validate_comparison_units(this, other, op_string):
# Check that other is a YTArray.
if hasattr(other, 'units'):
if this.units.expr is other.units.expr:
if this.units.base_value == other.units.base_value:
return other
if not this.units.same_dimensions_as(other.units):
raise YTUnitOperationError(op_string, this.units, other.units)
return other.in_units(this.units)
return other
@lru_cache(maxsize=128, typed=False)
def _unit_repr_check_same(my_units, other_units):
"""
Takes a Unit object, or string of known unit symbol, and check that it
is compatible with this quantity. Returns Unit object.
"""
# let Unit() handle units arg if it's not already a Unit obj.
if not isinstance(other_units, Unit):
other_units = Unit(other_units, registry=my_units.registry)
equiv_dims = em_dimensions.get(my_units.dimensions, None)
if equiv_dims == other_units.dimensions:
if current_mks in equiv_dims.free_symbols:
base = "SI"
else:
base = "CGS"
raise YTEquivalentDimsError(my_units, other_units, base)
if not my_units.same_dimensions_as(other_units):
raise YTUnitConversionError(
my_units, my_units.dimensions, other_units, other_units.dimensions)
return other_units
unary_operators = (
negative, absolute, rint, sign, conj, exp, exp2, log, log2,
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat,
)
binary_operators = (
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power,
remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor,
left_shift, right_shift, greater, greater_equal, less, less_equal,
not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum,
fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside
)
trigonometric_operators = (
sin, cos, tan,
)
class YTArray(np.ndarray):
"""
An ndarray subclass that attaches a symbolic unit object to the array data.
Parameters
----------
input_array : :obj:`!iterable`
A tuple, list, or array to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the array. Powers must be specified using python
syntax (cm**3, not cm^3).
registry : ~yt.units.unit_registry.UnitRegistry
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data. Defaults to the dtype of the input data,
or, if none is found, uses np.float64
bypass_validation : boolean
If True, all input validation is skipped. Using this option may produce
corrupted, invalid units or array data, but can lead to significant
speedups in the input validation logic adds significant overhead. If set,
input_units *must* be a valid unit object. Defaults to False.
Examples
--------
>>> from yt import YTArray
>>> a = YTArray([1, 2, 3], 'cm')
>>> b = YTArray([4, 5, 6], 'm')
>>> a + b
YTArray([ 401., 502., 603.]) cm
>>> b + a
YTArray([ 4.01, 5.02, 6.03]) m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTArray(np.arange(8) - 4, 'g/cm**3')
>>> np.abs(a)
YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3
and strip them when it would be annoying to deal with them.
>>> np.log10(a)
array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999,
0.69897 , 0.77815125, 0.84509804])
YTArray is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.arr(np.ones(5), 'code_length')
>>> a.in_cgs()
YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24,
3.08600000e+24, 3.08600000e+24]) cm
This is equivalent to:
>>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
_ufunc_registry = {
add: preserve_units,
subtract: preserve_units,
multiply: multiply_units,
divide: divide_units,
logaddexp: return_without_unit,
logaddexp2: return_without_unit,
true_divide: divide_units,
floor_divide: divide_units,
negative: passthrough_unit,
power: power_unit,
remainder: preserve_units,
mod: preserve_units,
fmod: preserve_units,
absolute: passthrough_unit,
fabs: passthrough_unit,
rint: return_without_unit,
sign: return_without_unit,
conj: passthrough_unit,
exp: return_without_unit,
exp2: return_without_unit,
log: return_without_unit,
log2: return_without_unit,
log10: return_without_unit,
expm1: return_without_unit,
log1p: return_without_unit,
sqrt: sqrt_unit,
square: square_unit,
reciprocal: reciprocal_unit,
sin: return_without_unit,
cos: return_without_unit,
tan: return_without_unit,
sinh: return_without_unit,
cosh: return_without_unit,
tanh: return_without_unit,
arcsin: return_without_unit,
arccos: return_without_unit,
arctan: return_without_unit,
arctan2: arctan2_unit,
arcsinh: return_without_unit,
arccosh: return_without_unit,
arctanh: return_without_unit,
hypot: preserve_units,
deg2rad: return_without_unit,
rad2deg: return_without_unit,
bitwise_and: bitop_units,
bitwise_or: bitop_units,
bitwise_xor: bitop_units,
invert: invert_units,
left_shift: bitop_units,
right_shift: bitop_units,
greater: comparison_unit,
greater_equal: comparison_unit,
less: comparison_unit,
less_equal: comparison_unit,
not_equal: comparison_unit,
equal: comparison_unit,
logical_and: comparison_unit,
logical_or: comparison_unit,
logical_xor: comparison_unit,
logical_not: return_without_unit,
maximum: preserve_units,
minimum: preserve_units,
fmax: preserve_units,
fmin: preserve_units,
isreal: return_without_unit,
iscomplex: return_without_unit,
isfinite: return_without_unit,
isinf: return_without_unit,
isnan: return_without_unit,
signbit: return_without_unit,
copysign: passthrough_unit,
nextafter: preserve_units,
modf: passthrough_unit,
ldexp: bitop_units,
frexp: return_without_unit,
floor: passthrough_unit,
ceil: passthrough_unit,
trunc: passthrough_unit,
spacing: passthrough_unit,
positive: passthrough_unit,
divmod_: passthrough_unit,
isnat: return_without_unit,
heaviside: preserve_units,
}
__array_priority__ = 2.0
def __new__(cls, input_array, input_units=None, registry=None, dtype=None,
bypass_validation=False):
if dtype is None:
dtype = getattr(input_array, 'dtype', np.float64)
if bypass_validation is True:
obj = np.asarray(input_array, dtype=dtype).view(cls)
obj.units = input_units
if registry is not None:
obj.units.registry = registry
return obj
if input_array is NotImplemented:
return input_array.view(cls)
if registry is None and isinstance(input_units, (str, bytes)):
if input_units.startswith('code_'):
raise UnitParseError(
"Code units used without referring to a dataset. \n"
"Perhaps you meant to do something like this instead: \n"
"ds.arr(%s, \"%s\")" % (input_array, input_units)
)
if isinstance(input_array, YTArray):
ret = input_array.view(cls)
if input_units is None:
if registry is None:
ret.units = input_array.units
else:
units = Unit(str(input_array.units), registry=registry)
ret.units = units
elif isinstance(input_units, Unit):
ret.units = input_units
else:
ret.units = Unit(input_units, registry=registry)
return ret
elif isinstance(input_array, np.ndarray):
pass
elif iterable(input_array) and input_array:
if isinstance(input_array[0], YTArray):
return YTArray(np.array(input_array, dtype=dtype),
input_array[0].units, registry=registry)
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array, dtype=dtype).view(cls)
# Check units type
if input_units is None:
# Nothing provided. Make dimensionless...
units = Unit()
elif isinstance(input_units, Unit):
if registry and registry is not input_units.registry:
units = Unit(str(input_units), registry=registry)
else:
units = input_units
else:
# units kwarg set, but it's not a Unit object.
# don't handle all the cases here, let the Unit class handle if
# it's a str.
units = Unit(input_units, registry=registry)
# Attach the units
obj.units = units
return obj
def __repr__(self):
"""
"""
return super(YTArray, self).__repr__()+' '+self.units.__repr__()
def __str__(self):
"""
"""
return str(self.view(np.ndarray)) + ' ' + str(self.units)
#
# Start unit conversion methods
#
def convert_to_units(self, units):
"""
Convert the array and units to the given units.
Parameters
----------
units : Unit object or str
The units you want to convert to.
"""
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
self.units = new_units
values = self.d
values *= conversion_factor
if offset:
np.subtract(self, offset*self.uq, self)
return self
def convert_to_base(self, unit_system="cgs"):
"""
Convert the array and units to the equivalent base units in
the specified unit system.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E.convert_to_base(unit_system="galactic")
"""
return self.convert_to_units(self.units.get_base_equivalent(unit_system))
def convert_to_cgs(self):
"""
Convert the array and units to the equivalent cgs units.
"""
return self.convert_to_units(self.units.get_cgs_equivalent())
def convert_to_mks(self):
"""
Convert the array and units to the equivalent mks units.
"""
return self.convert_to_units(self.units.get_mks_equivalent())
def in_units(self, units, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
YTArray
"""
if equivalence is None:
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
new_array = type(self)(self.ndview * conversion_factor, new_units)
if offset:
np.subtract(new_array, offset*new_array.uq, new_array)
return new_array
else:
return self.to_equivalent(units, equivalence, **kwargs)
def to(self, units, equivalence=None, **kwargs):
"""
An alias for YTArray.in_units().
See the docstrings of that function for details.
"""
return self.in_units(units, equivalence=equivalence, **kwargs)
def to_value(self, units=None, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it without units. Output is therefore a
bare NumPy array.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string, optional
The units you want to get the bare quantity in. If not
specified, the value will be returned in the current units.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
NumPy array
"""
if units is None:
v = self.value
else:
v = self.in_units(units, equivalence=equivalence, **kwargs).value
if isinstance(self, YTQuantity):
return float(v)
else:
return v
def in_base(self, unit_system="cgs"):
"""
Creates a copy of this array with the data in the specified unit system,
and returns it in that system's base units.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E_new = E.in_base(unit_system="galactic")
"""
return self.in_units(self.units.get_base_equivalent(unit_system))
def in_cgs(self):
"""
Creates a copy of this array with the data in the equivalent cgs units,
and returns it.
Returns
-------
Quantity object with data converted to cgs units.
"""
return self.in_units(self.units.get_cgs_equivalent())
def in_mks(self):
"""
Creates a copy of this array with the data in the equivalent mks units,
and returns it.
Returns
-------
Quantity object with data converted to mks units.
"""
return self.in_units(self.units.get_mks_equivalent())
def to_equivalent(self, unit, equiv, **kwargs):
"""
Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
related by only a constant factor but not in the same units.
Parameters
----------
unit : string
The unit that you wish to convert to.
equiv : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> a = yt.YTArray(1.0e7,"K")
>>> a.to_equivalent("keV", "thermal")
"""
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
return self.in_units(conv_unit)
this_equiv = equivalence_registry[equiv]()
oneway_or_equivalent = (
conv_unit.has_equivalent(equiv) or this_equiv._one_way)
if self.has_equivalent(equiv) and oneway_or_equivalent:
new_arr = this_equiv.convert(
self, conv_unit.dimensions, **kwargs)
if isinstance(new_arr, tuple):
try:
return type(self)(new_arr[0], new_arr[1]).in_units(unit)
except YTUnitConversionError:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
else:
return new_arr.in_units(unit)
else:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
def list_equivalencies(self):
"""
Lists the possible equivalencies associated with this YTArray or
YTQuantity.
"""
self.units.list_equivalencies()
def has_equivalent(self, equiv):
"""
Check to see if this YTArray or YTQuantity has an equivalent unit in
*equiv*.
"""
return self.units.has_equivalent(equiv)
def ndarray_view(self):
"""
Returns a view into the array, but as an ndarray rather than ytarray.
Returns
-------
View of this array's data.
"""
return self.view(np.ndarray)
def to_ndarray(self):
"""
Creates a copy of this array with the unit information stripped
"""
return np.array(self)
@classmethod
def from_astropy(cls, arr, unit_registry=None):
"""
Convert an AstroPy "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : AstroPy Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
"""
# Converting from AstroPy Quantity
u = arr.unit
ap_units = []
for base, exponent in zip(u.bases, u.powers):
unit_str = base.to_string()
# we have to do this because AstroPy is silly and defines
# hour as "h"
if unit_str == "h": unit_str = "hr"
ap_units.append("%s**(%s)" % (unit_str, Rational(exponent)))
ap_units = "*".join(ap_units)
if isinstance(arr.value, np.ndarray):
return YTArray(arr.value, ap_units, registry=unit_registry)
else:
return YTQuantity(arr.value, ap_units, registry=unit_registry)
def to_astropy(self, **kwargs):
"""
Creates a new AstroPy quantity with the same unit information.
"""
if _astropy.units is None:
raise ImportError("You don't have AstroPy installed, so you can't convert to " +
"an AstroPy quantity.")
return self.value*_astropy.units.Unit(str(self.units), **kwargs)
@classmethod
def from_pint(cls, arr, unit_registry=None):
"""
Convert a Pint "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : Pint Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
Examples
--------
>>> from pint import UnitRegistry
>>> import numpy as np
>>> ureg = UnitRegistry()
>>> a = np.random.random(10)
>>> b = ureg.Quantity(a, "erg/cm**3")
>>> c = yt.YTArray.from_pint(b)
"""
p_units = []
for base, exponent in arr._units.items():
bs = convert_pint_units(base)
p_units.append("%s**(%s)" % (bs, Rational(exponent)))
p_units = "*".join(p_units)
if isinstance(arr.magnitude, np.ndarray):
return YTArray(arr.magnitude, p_units, registry=unit_registry)
else:
return YTQuantity(arr.magnitude, p_units, registry=unit_registry)
def to_pint(self, unit_registry=None):
"""
Convert a YTArray or YTQuantity to a Pint Quantity.
Parameters
----------
arr : YTArray or YTQuantity
The unitful quantity to convert from.
unit_registry : Pint UnitRegistry, optional
The Pint UnitRegistry to use in the conversion. If one is not
supplied, the default one will be used. NOTE: This is not
the same as a yt UnitRegistry object.
Examples
--------
>>> a = YTQuantity(4.0, "cm**2/s")
>>> b = a.to_pint()
"""
from pint import UnitRegistry
if unit_registry is None:
unit_registry = UnitRegistry()
powers_dict = self.units.expr.as_powers_dict()
units = []
for unit, pow in powers_dict.items():
# we have to do this because Pint doesn't recognize
# "yr" as "year"
if str(unit).endswith("yr") and len(str(unit)) in [2,3]:
unit = str(unit).replace("yr","year")
units.append("%s**(%s)" % (unit, Rational(pow)))
units = "*".join(units)
return unit_registry.Quantity(self.value, units)
#
# End unit conversion methods
#
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info['units'] = str(self.units)
info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
@classmethod
def from_hdf5(cls, filename, dataset_name=None, group_name=None):
r"""Attempts read in and convert a dataset in an hdf5 file into a
YTArray.
Parameters
----------
filename: string
The filename to of the hdf5 file.
dataset_name: string
The name of the dataset to read from. If the dataset has a units
attribute, attempt to infer units as well.
group_name: string
An optional group to read the arrays from. If not specified, the
arrays are datasets at the top level by default.
"""
import h5py
from yt.extern.six.moves import cPickle as pickle
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
g = f[group_name]
else:
g = f
dataset = g[dataset_name]
data = dataset[:]
units = dataset.attrs.get('units', '')
if 'unit_registry' in dataset.attrs.keys():
unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring())
else:
unit_lut = None
f.close()
registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
return cls(data, units, registry=registry)
#
# Start convenience methods
#
@property
def value(self):
"""Get a copy of the array data as a numpy ndarray"""
return np.array(self)
v = value
@property
def ndview(self):
"""Get a view of the array data."""
return self.ndarray_view()
d = ndview
@property
def unit_quantity(self):
"""Get a YTQuantity with the same unit as this array and a value of
1.0"""
return YTQuantity(1.0, self.units)
uq = unit_quantity
@property
def unit_array(self):
"""Get a YTArray filled with ones with the same unit and shape as this
array"""
return np.ones_like(self)
ua = unit_array
def __getitem__(self, item):
ret = super(YTArray, self).__getitem__(item)
if ret.shape == ():
return YTQuantity(ret, self.units, bypass_validation=True)
else:
if hasattr(self, 'units'):
ret.units = self.units
return ret
#
# Start operation methods
#
if LooseVersion(np.__version__) < LooseVersion('1.13.0'):
def __add__(self, right_object):
"""
Add this ytarray to the object on the right of the `+` operator.
Must check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "addition")
return super(YTArray, self).__add__(ro)
def __radd__(self, left_object):
""" See __add__. """
lo = sanitize_units_add(self, left_object, "addition")
return super(YTArray, self).__radd__(lo)
def __iadd__(self, other):
""" See __add__. """
oth = sanitize_units_add(self, other, "addition")
np.add(self, oth, out=self)
return self
def __sub__(self, right_object):
"""
Subtract the object on the right of the `-` from this ytarray. Must
check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "subtraction")
return super(YTArray, self).__sub__(ro)
def __rsub__(self, left_object):
""" See __sub__. """
lo = sanitize_units_add(self, left_object, "subtraction")
return super(YTArray, self).__rsub__(lo)
def __isub__(self, other):
""" See __sub__. """
oth = sanitize_units_add(self, other, "subtraction")
np.subtract(self, oth, out=self)
return self
def __neg__(self):
""" Negate the data. """
return super(YTArray, self).__neg__()
def __mul__(self, right_object):
"""
Multiply this YTArray by the object on the right of the `*`
operator. The unit objects handle being multiplied.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__mul__(ro)
def __rmul__(self, left_object):
""" See __mul__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rmul__(lo)
def __imul__(self, other):
""" See __mul__. """
oth = sanitize_units_mul(self, other)
np.multiply(self, oth, out=self)
return self
def __div__(self, right_object):
"""
Divide this YTArray by the object on the right of the `/` operator.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__div__(ro)
def __rdiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rdiv__(lo)
def __idiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.divide(self, oth, out=self)
return self
def __truediv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__truediv__(ro)
def __rtruediv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rtruediv__(lo)
def __itruediv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.true_divide(self, oth, out=self)
return self
def __floordiv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__floordiv__(ro)
def __rfloordiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rfloordiv__(lo)
def __ifloordiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.floor_divide(self, oth, out=self)
return self
def __or__(self, right_object):
return super(YTArray, self).__or__(right_object)
def __ror__(self, left_object):
return super(YTArray, self).__ror__(left_object)
def __ior__(self, other):
np.bitwise_or(self, other, out=self)
return self
def __xor__(self, right_object):
return super(YTArray, self).__xor__(right_object)
def __rxor__(self, left_object):
return super(YTArray, self).__rxor__(left_object)
def __ixor__(self, other):
np.bitwise_xor(self, other, out=self)
return self
def __and__(self, right_object):
return super(YTArray, self).__and__(right_object)
def __rand__(self, left_object):
return super(YTArray, self).__rand__(left_object)
def __iand__(self, other):
| np.bitwise_and(self, other, out=self) | numpy.bitwise_and |
import random
import numpy as np
import pandas as pd
import pytest
from mizarlabs import static
from mizarlabs.transformers.microstructural_features.second_generation import (
AmihudLambda,
)
from mizarlabs.transformers.microstructural_features.second_generation import (
HasbrouckLambda,
)
from mizarlabs.transformers.microstructural_features.second_generation import KyleLambda
@pytest.mark.parametrize("window", random.choices(range(10, 40), k=3))
@pytest.mark.usefixtures("dollar_bar_dataframe")
def test_kyle_lambda(dollar_bar_dataframe: pd.DataFrame, window: int):
"""
Checks the following:
1) if the no. of NaNs is as expected, i.e. equal to the window.
2) if all values after the warming up period are computed.
3) if all values are positive, as Kyle's Lambda is always positive.
"""
kyle_lambda_transformer = KyleLambda(window)
kyle_lambda_transformer_values = kyle_lambda_transformer.transform(
dollar_bar_dataframe[[static.CLOSE, static.BASE_ASSET_VOLUME]].astype(float)
)
assert np.isnan(kyle_lambda_transformer_values).sum() == window
assert | np.isnan(kyle_lambda_transformer_values[:window]) | numpy.isnan |
import numpy as np
import pytest
from scipy.stats import (bootstrap, BootstrapDegenerateDistributionWarning,
monte_carlo_test, permutation_test)
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from scipy import stats
from scipy import special
from .. import _resampling as _resampling
from scipy._lib._util import rng_integers
from scipy.optimize import root
def test_bootstrap_iv():
message = "`data` must be a sequence of samples."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean)
message = "`data` must contain at least one sample."
with pytest.raises(ValueError, match=message):
bootstrap(tuple(), np.mean)
message = "each sample in `data` must contain two or more observations..."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1]), np.mean)
message = ("When `paired is True`, all samples must have the same length ")
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True)
message = "`vectorized` must be `True` or `False`."
with pytest.raises(ValueError, match=message):
bootstrap(1, np.mean, vectorized='ekki')
message = "`axis` must be an integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, axis=1.5)
message = "could not convert string to float"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, confidence_level='ni')
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000)
message = "`n_resamples` must be a positive integer."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=-1000)
message = "`batch` must be a positive integer or None."
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, batch=1000.5)
message = "`method` must be in"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, method='ekki')
message = "`method = 'BCa' is only available for one-sample statistics"
def statistic(x, y, axis):
mean1 = np.mean(x, axis)
mean2 = np.mean(y, axis)
return mean1 - mean2
with pytest.raises(ValueError, match=message):
bootstrap(([.1, .2, .3], [.1, .2, .3]), statistic, method='BCa')
message = "'herring' cannot be used to seed a"
with pytest.raises(ValueError, match=message):
bootstrap(([1, 2, 3],), np.mean, random_state='herring')
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_bootstrap_batch(method, axis):
# for one-sample statistics, batch size shouldn't affect the result
np.random.seed(0)
x = np.random.rand(10, 11, 12)
res1 = bootstrap((x,), np.mean, batch=None, method=method,
random_state=0, axis=axis, n_resamples=100)
res2 = bootstrap((x,), np.mean, batch=10, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_equal(res2.confidence_interval.low, res1.confidence_interval.low)
assert_equal(res2.confidence_interval.high, res1.confidence_interval.high)
assert_equal(res2.standard_error, res1.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_paired(method):
# test that `paired` works as expected
np.random.seed(0)
n = 100
x = np.random.rand(n)
y = np.random.rand(n)
def my_statistic(x, y, axis=-1):
return ((x-y)**2).mean(axis=axis)
def my_paired_statistic(i, axis=-1):
a = x[i]
b = y[i]
res = my_statistic(a, b)
return res
i = np.arange(len(x))
res1 = bootstrap((i,), my_paired_statistic, random_state=0)
res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("paired", [True, False])
def test_bootstrap_vectorized(method, axis, paired):
# test that paired is vectorized as expected: when samples are tiled,
# CI and standard_error of each axis-slice is the same as those of the
# original 1d sample
if not paired and method == 'BCa':
# should re-assess when BCa is extended
pytest.xfail(reason="BCa currently for 1-sample statistics only")
np.random.seed(0)
def my_statistic(x, y, z, axis=-1):
return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis)
shape = 10, 11, 12
n_samples = shape[axis]
x = np.random.rand(n_samples)
y = np.random.rand(n_samples)
z = np.random.rand(n_samples)
res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=0, n_resamples=100)
reshape = [1, 1, 1]
reshape[axis] = n_samples
x = np.broadcast_to(x.reshape(reshape), shape)
y = np.broadcast_to(y.reshape(reshape), shape)
z = np.broadcast_to(z.reshape(reshape), shape)
res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
random_state=0, axis=axis, n_resamples=100)
assert_allclose(res2.confidence_interval.low,
res1.confidence_interval.low)
assert_allclose(res2.confidence_interval.high,
res1.confidence_interval.high)
assert_allclose(res2.standard_error, res1.standard_error)
result_shape = list(shape)
result_shape.pop(axis)
assert_equal(res2.confidence_interval.low.shape, result_shape)
assert_equal(res2.confidence_interval.high.shape, result_shape)
assert_equal(res2.standard_error.shape, result_shape)
@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
def test_bootstrap_against_theory(method):
# based on https://www.statology.org/confidence-intervals-python/
data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=0)
alpha = 0.95
dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data))
expected_interval = dist.interval(confidence=alpha)
expected_se = dist.std()
res = bootstrap((data,), np.mean, n_resamples=5000,
confidence_level=alpha, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4)
assert_allclose(res.standard_error, expected_se, atol=3e-4)
tests_R = {"basic": (23.77, 79.12),
"percentile": (28.86, 84.21),
"BCa": (32.31, 91.43)}
@pytest.mark.parametrize("method, expected", tests_R.items())
def test_bootstrap_against_R(method, expected):
# Compare against R's "boot" library
# library(boot)
# stat <- function (x, a) {
# mean(x[a])
# }
# x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
# 23, 34, 50, 81, 89, 121, 134, 213)
# # Use a large value so we get a few significant digits for the CI.
# n = 1000000
# bootresult = boot(x, stat, n)
# result <- boot.ci(bootresult)
# print(result)
x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
23, 34, 50, 81, 89, 121, 134, 213])
res = bootstrap((x,), np.mean, n_resamples=1000000, method=method,
random_state=0)
assert_allclose(res.confidence_interval, expected, rtol=0.005)
tests_against_itself_1samp = {"basic": 1780,
"percentile": 1784,
"BCa": 1784}
@pytest.mark.parametrize("method, expected",
tests_against_itself_1samp.items())
def test_bootstrap_against_itself_1samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n = 100 # size of sample
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The true mean is 5
dist = stats.norm(loc=5, scale=1)
stat_true = dist.mean()
# Do the same thing 2000 times. (The code is fully vectorized.)
n_replications = 2000
data = dist.rvs(size=(n_replications, n))
res = bootstrap((data,),
statistic=np.mean,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
tests_against_itself_2samp = {"basic": 892,
"percentile": 890}
@pytest.mark.parametrize("method, expected",
tests_against_itself_2samp.items())
def test_bootstrap_against_itself_2samp(method, expected):
# The expected values in this test were generated using bootstrap
# to check for unintended changes in behavior. The test also makes sure
# that bootstrap works with multi-sample statistics and that the
# `axis` argument works as expected / function is vectorized.
np.random.seed(0)
n1 = 100 # size of sample 1
n2 = 120 # size of sample 2
n_resamples = 999 # number of bootstrap resamples used to form each CI
confidence_level = 0.9
# The statistic we're interested in is the difference in means
def my_stat(data1, data2, axis=-1):
mean1 = np.mean(data1, axis=axis)
mean2 = np.mean(data2, axis=axis)
return mean1 - mean2
# The true difference in the means is -0.1
dist1 = stats.norm(loc=0, scale=1)
dist2 = stats.norm(loc=0.1, scale=1)
stat_true = dist1.mean() - dist2.mean()
# Do the same thing 1000 times. (The code is fully vectorized.)
n_replications = 1000
data1 = dist1.rvs(size=(n_replications, n1))
data2 = dist2.rvs(size=(n_replications, n2))
res = bootstrap((data1, data2),
statistic=my_stat,
confidence_level=confidence_level,
n_resamples=n_resamples,
batch=50,
method=method,
axis=-1)
ci = res.confidence_interval
# ci contains vectors of lower and upper confidence interval bounds
ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
assert ci_contains_true == expected
# ci_contains_true is not inconsistent with confidence_level
pvalue = stats.binomtest(ci_contains_true, n_replications,
confidence_level).pvalue
assert pvalue > 0.1
@pytest.mark.parametrize("method", ["basic", "percentile"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_3samp(method, axis):
def statistic(*data, axis=0):
# an arbitrary, vectorized statistic
return sum((sample.mean(axis) for sample in data))
def statistic_1d(*data):
# the same statistic, not vectorized
for sample in data:
assert sample.ndim == 1
return statistic(*data, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
y = np.random.rand(4, 5)
z = np.random.rand(4, 5)
res1 = bootstrap((x, y, z), statistic, vectorized=True,
axis=axis, n_resamples=100, method=method, random_state=0)
res2 = bootstrap((x, y, z), statistic_1d, vectorized=False,
axis=axis, n_resamples=100, method=method, random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107")
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
@pytest.mark.parametrize("axis", [0, 1])
def test_bootstrap_vectorized_1samp(method, axis):
def statistic(x, axis=0):
# an arbitrary, vectorized statistic
return x.mean(axis=axis)
def statistic_1d(x):
# the same statistic, not vectorized
assert x.ndim == 1
return statistic(x, axis=0)
np.random.seed(0)
x = np.random.rand(4, 5)
res1 = bootstrap((x,), statistic, vectorized=True, axis=axis,
n_resamples=100, batch=None, method=method,
random_state=0)
res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis,
n_resamples=100, batch=10, method=method,
random_state=0)
assert_allclose(res1.confidence_interval, res2.confidence_interval)
assert_allclose(res1.standard_error, res2.standard_error)
@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
def test_bootstrap_degenerate(method):
data = 35 * [10000.]
if method == "BCa":
with np.errstate(invalid='ignore'):
with pytest.warns(BootstrapDegenerateDistributionWarning):
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (np.nan, np.nan))
else:
res = bootstrap([data, ], np.mean, method=method)
assert_equal(res.confidence_interval, (10000., 10000.))
| assert_equal(res.standard_error, 0) | numpy.testing.assert_equal |
import os
import datetime
import numpy as np
from moviepy.video.VideoClip import VideoClip
from moviepy.video.fx.resize import resize
VideoClip.resize = resize
import mahotas
from uuid import uuid4
from definitions import TmpDir
def animateImagesFromResults(imglist, datetimelist, mask, settings, logger, temporalmode, temporalrange, temporalthreshold, replaceimages, varstoplot, barwidth, barlocation, duration, fps, resolution,fformat, resdata):
if len(imglist) == 0:
return False
if mask is not None:
mask, pgs, th = mask
(duration,fps,resolution,barwidth) = map(float,(duration,fps,resolution[:-1],barwidth))
barwidth = barwidth/100.0
resolution = int(resolution)
temporalthreshold = datetime.timedelta(hours=float(temporalthreshold))
logger.set('Generating animation...')
res_captions = []
res_data = []
for i,v in enumerate(resdata):
if i % 2 == 0:
res_captions.append(v)
else:
res_data.append(v)
resdata = None
# if temporalmode == 'Date interval':
if True:
sdate = min([datetime.datetime.strptime(temporalrange[0],'%d.%m.%Y'),datetime.datetime.strptime(temporalrange[1],'%d.%m.%Y')])
edate = max([datetime.datetime.strptime(temporalrange[0],'%d.%m.%Y'),datetime.datetime.strptime(temporalrange[1],'%d.%m.%Y')])
logger.set('Number of images:'+str(np.sum((np.array(datetimelist)<=edate)*(np.array(datetimelist)>=sdate))))
if fps == 0:
fps = np.sum((np.array(datetimelist)<=edate)*(np.array(datetimelist)>=sdate))/duration
if fps < 1:
fps = 1.0
else: #range in data
sdate = min(res_data[res_captions.index('Time')])
edate = max(res_data[res_captions.index('Time')])
logger.set('Number of images:'+str(len(imglist)))
if fps == 0:
fps = len(datetimelist)/duration
if fps < 1:
fps = 1.0
logger.set('Animation duration: '+str(datetime.timedelta(seconds=duration)))
logger.set('Frames per second: '+str(fps))
logger.set('Number of frames: '+str(fps*duration))
logger.set('Resolution '+str(resolution)+'p')
logger.set('Format: '+str(fformat))
dateratio = (edate-sdate).total_seconds()/float(duration)
animfname = str(uuid4())+'.'+fformat.lower()
while os.path.isfile(os.path.join(TmpDir,animfname)):
animfname = str(uuid4())+'.'+fformat.lower()
animfname = os.path.join(TmpDir,animfname)
datetimelist = np.array(datetimelist)
range_total_secs = abs(edate-sdate).total_seconds()
for i,v in enumerate(varstoplot):
if v[1] != 'Time':
if v[4] == '':
varstoplot[i][4] = np.nanmin(res_data[res_captions.index(v[1])])
else:
varstoplot[i][4] = float(v[4])
if v[5] == '':
varstoplot[i][5] = np.nanmax(res_data[res_captions.index(v[1])])
else:
varstoplot[i][5] = float(v[5])
def make_frame(t):
res_date = res_data[res_captions.index('Time')][np.argmin(np.abs(res_data[res_captions.index('Time')]-sdate-datetime.timedelta(seconds=dateratio*t)))]
if abs(res_date-sdate-datetime.timedelta(seconds=dateratio*t)) > temporalthreshold:
img_file = False
else:
if res_date in datetimelist:
img_date = res_date
img_file = imglist[datetimelist.tolist().index(img_date)]
try:
img = mahotas.imread(img_file)
except:
img_file = False
if res_date not in datetimelist or img_file is False: #'Closest image','Blank (Black)','Blank (White)','Monochromatic Noise'
if replaceimages == 'Closest image': #xxcheck later again
img_date = datetimelist[np.argmin(np.abs(datetimelist-res_date))]
img_file = imglist[np.argmin(np.abs(datetimelist-res_date))]
img = mahotas.imread(img_file)
else:
img_date = res_date
if replaceimages == 'Blank (Black)':
img = mahotas.imread(imglist[0])*0
if replaceimages == 'Blank (White)':
img = mahotas.imread(imglist[0])*0+255
if replaceimages == 'Monochromatic Noise':
img = (np.random.rand(*mahotas.imread(imglist[0]).shape[:2])*255).astype('uint8')
img = np.dstack((img,img,img))
vid_date = sdate+datetime.timedelta(seconds=dateratio*t)
res_toy = abs(datetime.datetime(res_date.year,1,1,0,0,0)-res_date).total_seconds()/float(abs(datetime.datetime(res_date.year,12,31,23,59,59)-datetime.datetime(res_date.year,1,1,0,0,0)).total_seconds())
if img_file == False:
res_toy = 0.0
vid_toy = datetime.timedelta(seconds=dateratio*t).total_seconds()/float(range_total_secs)
if barlocation == 'Right' or barlocation == 'Left':
barshape = (img.shape[0],int(round(img.shape[1]*barwidth)))
for v in varstoplot:
if bool(int(v[0])):
barframe = | np.zeros(barshape,dtype='uint8') | numpy.zeros |
import random
import numpy as np
import enum
from ..utilities.transformations import *
from ..utilities.geometry import steer
from ..mechanics.mechanics import *
from ..mechanics.stability_margin import *
from .tree import RRTTree, RRTEdge
import time
import scipy.optimize
def qlcp_solver(c,M1,M2,G,h,A,b,nvar,lb,ub):
b = b.flatten()
cons = ({'type': 'ineq', 'fun': lambda z: np.dot(M2,z)*np.dot(M1,z)},
{'type': 'ineq', 'fun': lambda z: np.dot(G,z)+h },
{'type': 'ineq', 'fun': lambda z: np.dot(M1, z)},
{'type': 'eq', 'fun': lambda z: np.dot(A,z)+b })
fun = lambda z: np.dot(z[0:3],z[0:3]) - 2*np.dot(c,z[0:3])
jac = lambda z: np.concatenate((2*(z[0:3]-c), np.zeros(nvar-3)))
bnds = scipy.optimize.Bounds(lb=lb,ub=ub)
res = scipy.optimize.minimize(fun, np.zeros(nvar), method='SLSQP', jac=jac, bounds=bnds, constraints=cons)
return res
def qlcp(x, v, mnp, env, object_weight, mnp_mu=0.8, env_mu=0.3, mnp_fn_max=None):
x = np.array(x)
# Get variable sizes.
n_m = len(mnp)
n_e = len(env)
n_c = n_m + n_e
n_var = 3 + 4*n_e + 2*n_m
# Make contact info.
Ad_gcos, depths, mus = contact_info(mnp, env, mnp_mu, env_mu)
depths = [0.0]*n_c
# object gravity
f_g = np.array([[0.0],[object_weight],[0.0]])
g_ow = np.linalg.inv(twist_to_transform(x))
f_o = np.dot(g_ow, f_g)
n = np.array([[0.0],[1.0],[0.0]])
D = np.array([[1.0, -1.0],[0.0, 0.0],[0.0, 0.0]])
# Gx >= h
G = np.zeros((n_c,n_var))
h = np.zeros(n_c)
# Ax = b
A = np.zeros((3, n_var))
b = f_o
M1 = np.zeros((3*n_e,n_var))
M2 = np.zeros((3*n_e,n_var))
i_var = 3
for i in range(n_c):
N_c = np.dot(Ad_gcos[i].T, n)
T_c = np.dot(Ad_gcos[i].T, D)
nAd_gco = np.dot(n.T, Ad_gcos[i]).flatten()
TAd_gco = np.dot(D.T, Ad_gcos[i])
if i >= n_e:
mu = mnp_mu
A[:, i_var:i_var + 2] = np.hstack((N_c, T_c[:,0].reshape((-1,1))))
G[i, i_var:i_var + 2] = np.array([mu, -1])
i_var += 2
else:
mu = env_mu
A[:, i_var:i_var + 3] = np.hstack((N_c, T_c))
G[i, i_var:i_var + 3] = np.array([mu, -1, -1])
M1[i*3,0:3] = nAd_gco
M1[i*3+1:i*3+3, 0:3] = TAd_gco
M1[i*3+1:i*3+3,6+i*4] = 1
M2[i*3:(i+1)*3, 3+i*4:3+i*4+4] = 1
i_var += 4
lb = np.zeros(n_var)
lb[0:3] = -np.inf
3+4*n_e + np.arange(1,2*n_m,2)
lb[3+4*n_e + np.arange(1,2*n_m,2)] = -np.inf
ub = np.full(n_var, np.inf)
if mnp_fn_max is not None:
ub[3+4*n_e + np.arange(0,2*n_m,2)] = mnp_fn_max
res = qlcp_solver(v,M1,M2,G,h,A,b,n_var,lb,ub)
if res.success:
vx = res.x[0:3]
else:
vx = np.zeros(3)
return vx
class Status(enum.Enum):
FAILED = 1
TRAPPED = 2
ADVANCED = 3
REACHED = 4
def smallfmod(x, y):
while x > y:
x -= y
while x < 0:
x += y
return x
def get_both_velocities(x_rand, x_near):
x_rand = np.array(x_rand)
x_rand_ = x_rand[:]
x = np.array(x_near)
g_v = np.identity(3)
g_v[0:2, 0:2] = config2trans(x)[0:2, 0:2]
v_star = np.dot(g_v.T, x_rand - np.array(x))
if v_star[2] > 0:
x_rand_[2] = x_rand[2] - 2 * np.pi
else:
x_rand_[2] = x_rand[2] + 2 * np.pi
v_star_ = np.dot(g_v.T, x_rand_ - x)
return v_star, v_star_
class RRT1(object):
def __init__(self, X, x_init, x_goal, envir, object, manipulator, max_samples, r=5, world='planar'):
"""
Template RRTKinodynamic Planner
"""
self.X = X
self.samples_taken = 0
self.max_samples = max_samples
self.x_init = x_init
self.x_goal = x_goal
self.neighbor_radius = r
self.world = world
if self.world == 'vert':
self.object_weight = 10
else:
self.object_weight = 0
self.trees = [] # list of all trees
self.add_tree() # add initial tree
self.environment = envir
self.object = object
self.manipulator = manipulator
self.collision_manager = []
self.mnp_mu = 0.8
self.env_mu = 0.3
self.dist_weight = 1
self.goal_kch = [1, 1, 1]
self.cost_weight = [0.2, 1, 1]
self.step_length = 2
self.mnp_fn_max = None
def add_tree(self):
"""
Create an empty tree and add to trees
"""
self.trees.append(RRTTree())
def set_world(self, key):
self.world = key
def dist(self, p, q):
cx = (p[0] - q[0]) ** 2
cy = (p[1] - q[1]) ** 2
period = 2 * np.pi
t1 = smallfmod(p[2], period)
t2 = smallfmod(q[2], period)
dt = t2 - t1
dt = smallfmod(dt + period / 2.0, period) - period / 2.0
ct = self.dist_weight * dt ** 2
return cx + cy + ct
def goal_dist(self, p):
q = self.x_goal
cx = (p[0] - q[0]) ** 2
cy = (p[1] - q[1]) ** 2
period = 2 * np.pi
t1 = smallfmod(p[2], period)
t2 = smallfmod(q[2], period)
dt = t2 - t1
dt = smallfmod(dt + period / 2.0, period) - period / 2.0
ct = dt ** 2
dist = self.goal_kch[0] * cx + self.goal_kch[1] * cy + self.goal_kch[2] * ct
return dist
def get_nearest(self, tree, x):
"""
Return vertex nearest to x
:param tree: int, tree being searched
:param x: tuple, vertex around which searching
:return: tuple, nearest vertex to x
"""
min_d = np.inf
for q in self.trees[tree].nodes:
d = self.dist(q, x)
if q in self.trees[tree].edges:
if min_d > d:
min_d = d
q_near = q
else:
self.trees[tree].nodes.remove(q)
return q_near
def get_unexpand_nearest(self, tree):
"""
Return vertex nearest to x
:param tree: int, tree being searched
:param x: tuple, vertex around which searching
:return: tuple, nearest vertex to x
"""
min_d = np.inf
q_near = self.x_init
for q in self.trees[tree].nodes:
if self.trees[tree].goal_expand[q]:
continue
d = self.goal_dist(q)
if q in self.trees[tree].edges:
if min_d > d:
min_d = d
q_near = q
else:
self.trees[tree].nodes.remove(q)
return q_near
def get_goal_nearest(self, tree):
"""
Return vertex nearest to x
:param tree: int, tree being searched
:param x: tuple, vertex around which searching
:return: tuple, nearest vertex to x
"""
min_d = np.inf
for q in self.trees[tree].nodes:
d = self.goal_dist(q)
if q in self.trees[tree].edges:
if min_d > d:
min_d = d
q_near = q
else:
self.trees[tree].nodes.remove(q)
return q_near, min_d
def reconstruct_path(self, tree, x_init, x_goal):
"""
Reconstruct path from start to goal
:param tree: int, tree in which to find path
:param x_init: tuple, starting vertex
:param x_goal: tuple, ending vertex
:return: sequence of vertices from start to goal
"""
n_nodes = 2
path = [x_goal]
current = x_goal
mnp_path = [None]
key_path = []
key_mnp_path = []
if x_init == x_goal:
return path, mnp_path
while not self.trees[tree].edges[current].parent == x_init:
# path.append(self.trees[tree].E[current])
n_nodes += 1
key_path.append(self.trees[tree].edges[current].parent)
key_mnp_path.append(self.trees[tree].edges[current].manip)
current_path = self.trees[tree].edges[current].path
path += current_path
mnp_path += [self.trees[tree].edges[current].manip] * len(current_path)
current = self.trees[tree].edges[current].parent
print(current)
current_path = self.trees[tree].edges[current].path
path += current_path
mnp_path += [self.trees[tree].edges[current].manip] * len(current_path)
key_path.append(self.trees[tree].edges[current].parent)
key_mnp_path.append(self.trees[tree].edges[current].manip)
path.append(x_init)
mnp_path.append(None)
path.reverse()
mnp_path.reverse()
print('number of nodes', n_nodes)
return path, mnp_path, key_path, key_mnp_path
def add_waypoints_to_tree(self, tree, edge):
parent = edge.parent
path = edge.path[:]
path.reverse()
mode = edge.mode
mnps = edge.manip
d_i = int(len(path) / 3) + 1
# print(len(path))
i = d_i
while i < len(path):
x_new = path[i]
path_i = path[0:i + 1]
path_i.reverse()
_, envs = self.check_collision(x_new)
edge_ = RRTEdge(parent, mnps, envs, path_i, mode)
self.trees[tree].add(x_new, edge_)
i += d_i
def add_collision_manager(self, collision_manager, object, object_shape):
self.collision_manager = collision_manager
self.object = object
self.object_shape = object_shape
def check_collision(self, x):
if_collide, w_contacts = self.environment.check_collision(self.object, x)
contacts = self.object.contacts2objframe(w_contacts, x)
return if_collide, contacts
def check_penetration(self, contacts):
ifPenetrate = False
for c in contacts:
if c.d < -0.05:
ifPenetrate = True
# print('penetrate')
break
return ifPenetrate
def contact_modes(self, x, envs):
# TODO: number of manipulator contacts should change according to mnp types
# _, envs = self.check_collision(x)
modes = get_contact_modes([Contact([], [], None)] * self.manipulator.npts, envs)
return modes
# @profile
def resample_manipulator_contacts(self, tree, x):
# mnp = self.object.sample_contacts(1)
# ifReturn = True
# mnp_config = None
pre_mnp = self.trees[tree].edges[x].manip
num_manip = self.manipulator.npts
ifReturn = False
mnp_config = None
if pre_mnp is None:
while not ifReturn:
mnp = self.object.sample_contacts(num_manip)
isReachable, mnp_config = self.manipulator.inverse_kinematics(mnp)
# ifCollide, _ = self.environment.check_collision(self.manipulator, mnp_config)
ifCollide = self.manipulator.if_collide_w_env(self.environment, mnp_config, x)
ifReturn = isReachable and (not ifCollide)
return ifReturn, mnp, mnp_config
else:
counter = 0
max_count = 4
# ifReturn = False
while counter < max_count:
counter += 1
mnp = np.array([None] * num_manip)
# random find contacts that change
num_manip_left = random.randint(0, num_manip - 1)
manip_left = random.sample(range(num_manip), num_manip_left)
# check if equilibrium if the selected manip contacts are moved
if static_equilibrium(x, np.array(pre_mnp)[manip_left], self.trees[tree].edges[x].env, self.world,
self.mnp_mu, self.env_mu, self.mnp_fn_max):
# randomly sample manipulator contacts
mnp[manip_left] = np.array(pre_mnp)[manip_left]
for i in range(len(mnp)):
if mnp[i] is None:
mnp[i] = self.object.sample_contacts(1)[0]
# check inverse kinematics
isReachable, mnp_config = self.manipulator.inverse_kinematics(mnp)
if isReachable:
ifCollide = self.manipulator.if_collide_w_env(self.environment, mnp_config, x)
# if (mnp_config[3] < 0 or mnp_config[1] < 0) and not ifCollide:
# print('sampled!',x)
if not ifCollide:
ifReturn = True
break
if ifReturn and mnp[0] is None:
print('sth wrong with resample manipulator contacts')
return ifReturn, mnp, mnp_config
# @profile
def best_mnp_location(self, tree, x_near, x_rand, vel):
n_sample = 5
g_v = np.identity(3)
g_v[0:2, 0:2] = config2trans(x_near)[0:2, 0:2]
v_star = np.dot(g_v.T, | np.array(x_rand) | numpy.array |
import numpy as np
import copy
from sklearn.preprocessing import normalize
import BasicRegression as BR
import numexpr as ne
import Kernel
class PLSR(BR.BasicRegression):
def __init__(self, n_components = None):
self.n_components = n_components
pass
def fit(self, X, Y, d = None, max_iter = 500, eps = 1e-6):
""" Fitting the data with Principal Least Squares Regression
N: the number of data
Mx: the number of variable in X
My: the number of variable in Y
X : N x Mx nparray
Y : N * My nparray
max_iter: the maximum number of iteratons
eps: the maximum tolerant of diff. between iterations
"""
X_norm, Y_norm = self.preprocessing(X, Y)
if d == None:
d = self.n_components
if d == None:
d = | np.min((self.Mx, self.N)) | numpy.min |
# Author: <NAME>
# Contributors: <NAME>, <NAME>
import numpy as np
import torch
from nose.tools import raises
from cgnet.feature.utils import (GaussianRBF, PolynomialCutoffRBF,
ShiftedSoftplus, _AbstractRBFLayer)
from cgnet.feature.statistics import GeometryStatistics
from cgnet.feature.feature import GeometryFeature, Geometry
# Define sizes for a pseudo-dataset
frames = np.random.randint(10, 30)
beads = np.random.randint(5, 10)
g = Geometry(method='torch')
@raises(NotImplementedError)
def test_radial_basis_function_len():
# Make sure that a NotImplementedError is raised if an RBF layer
# does not have a __len__() method
# Here, we use the _AbstractRBFLayer base class as our RBF
abstract_RBF = _AbstractRBFLayer()
# Next, we check to see if the NotImplementedError is raised
# This is done using the decorator above, because we cannot
# use nose.tools.assert_raises directly on special methods
len(abstract_RBF)
def test_radial_basis_function():
# Make sure radial basis functions are consistent with manual calculation
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance)
gauss_layer = rbf.forward(distances)
# Manually calculate expansion with numpy
# according to the following formula:
# e_k (r_j - r_i) = exp(- \gamma (\left \| r_j - r_i \right \| - \mu_k)^2)
# with centers mu_k calculated on a uniform grid between
# zero and the distance cutoff and gamma as a scaling parameter.
centers = np.linspace(low_cutoff, high_cutoff,
n_gaussians).astype(np.float64)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = np.exp(gamma * magnitude_squared)
# Shapes and values need to be the same
np.testing.assert_equal(centers.shape, rbf.centers.shape)
np.testing.assert_allclose(gauss_layer.numpy(), gauss_manual, rtol=1e-5)
def test_radial_basis_function_distance_masking():
# Makes sure that if a distance mask is used, the corresponding
# expanded distances returned by GaussianRBF are zero
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
n_gaussians = np.random.randint(5, 10)
neighbor_cutoff = np.abs(np.random.rand())
neighbors, neighbor_mask = g.get_neighbors(distances,
cutoff=neighbor_cutoff)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance)
gauss_layer = rbf.forward(distances, distance_mask=neighbor_mask)
# Lastly, we check to see that the application of the mask is correct
# against a manual calculation and masking
centers = np.linspace(low_cutoff, high_cutoff, n_gaussians)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = torch.tensor(np.exp(gamma * magnitude_squared))
gauss_manual = gauss_manual * neighbor_mask[:, :, :, None].double()
np.testing.assert_array_almost_equal(gauss_layer.numpy(),
gauss_manual.numpy())
def test_radial_basis_function_normalize():
# Tests to make sure that the output of GaussianRBF is properly
# normalized if 'normalize_output' is specified as True
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance,
normalize_output=True)
gauss_layer = rbf.forward(distances)
# Manually calculate expansion with numpy
# according to the following formula:
# e_k (r_j - r_i) = exp(- \gamma (\left \| r_j - r_i \right \| - \mu_k)^2)
# with centers mu_k calculated on a uniform grid between
# zero and the distance cutoff and gamma as a scaling parameter.
centers = np.linspace(low_cutoff, high_cutoff,
n_gaussians).astype(np.float64)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = np.exp(gamma * magnitude_squared)
# manual output normalization
gauss_manual = gauss_manual / np.sum(gauss_manual, axis=3)[:, :, :, None]
# Shapes and values need to be the same
np.testing.assert_equal(centers.shape, rbf.centers.shape)
np.testing.assert_allclose(gauss_layer.numpy(), gauss_manual, rtol=1e-5)
def test_polynomial_cutoff_rbf():
# Make sure the polynomial_cutoff radial basis functions are consistent with
# manual calculations
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = np.random.randn(frames, beads, beads - 1).astype(np.float64)
# Define random parameters for the polynomial_cutoff RBF
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
alpha = np.random.uniform(0.1, 1.0)
# Calculate Gaussian expansion using the implemented layer
polynomial_cutoff_rbf = PolynomialCutoffRBF(high_cutoff=high_cutoff,
low_cutoff=low_cutoff,
n_gaussians=n_gaussians,
alpha=alpha,
tolerance=1e-8)
polynomial_cutoff_rbf_layer = polynomial_cutoff_rbf.forward(
torch.tensor(distances))
# Manually calculate expansion with numpy
# First, we compute the centers and the scaling factors
centers = np.linspace(np.exp(-high_cutoff), np.exp(-low_cutoff),
n_gaussians).astype(np.float64)
beta = np.power(((2/n_gaussians) * (1-np.exp(-high_cutoff))), -2)
# Next, we compute the gaussian portion
exp_distances = np.exp(-alpha * np.expand_dims(distances, axis=3))
magnitude_squared = np.power(exp_distances - centers, 2)
gauss_manual = np.exp(-beta * magnitude_squared)
# Next, we compute the polynomial modulation
zeros = np.zeros_like(distances)
modulation = np.where(distances < high_cutoff,
1 - 6.0 * np.power((distances/high_cutoff), 5)
+ 15.0 * np.power((distances/high_cutoff), 4)
- 10.0 * np.power((distances/high_cutoff), 3),
zeros)
modulation = np.expand_dims(modulation, axis=3)
polynomial_cutoff_rbf_manual = modulation * gauss_manual
# Map tiny values to zero
polynomial_cutoff_rbf_manual = np.where(
np.abs(polynomial_cutoff_rbf_manual) > polynomial_cutoff_rbf.tolerance,
polynomial_cutoff_rbf_manual,
np.zeros_like(polynomial_cutoff_rbf_manual)
)
# centers and output values need to be the same
np.testing.assert_allclose(centers,
polynomial_cutoff_rbf.centers, rtol=1e-5)
np.testing.assert_allclose(polynomial_cutoff_rbf_layer.numpy(),
polynomial_cutoff_rbf_manual, rtol=1e-5)
def test_polynomial_cutoff_rbf_distance_masking():
# Makes sure that if a distance mask is used, the corresponding
# expanded distances returned by PolynomialCutoffRBF are zero
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
alpha = np.random.uniform(0.1, 1.0)
neighbor_cutoff = np.abs(np.random.rand())
neighbors, neighbor_mask = g.get_neighbors(distances,
cutoff=neighbor_cutoff)
# Calculate Gaussian expansion using the implemented layer
polynomial_cutoff_rbf = PolynomialCutoffRBF(high_cutoff=high_cutoff,
low_cutoff=low_cutoff,
n_gaussians=n_gaussians,
alpha=alpha,
tolerance=1e-8)
polynomial_cutoff_rbf_layer = polynomial_cutoff_rbf.forward(
torch.tensor(distances),
distance_mask=neighbor_mask)
# Manually calculate expansion with numpy
# First, we compute the centers and the scaling factors
centers = np.linspace(np.exp(-high_cutoff), np.exp(-low_cutoff),
n_gaussians).astype(np.float64)
beta = np.power(((2/n_gaussians) * (1-np.exp(-high_cutoff))), -2)
# Next, we compute the gaussian portion
exp_distances = np.exp(-alpha * np.expand_dims(distances, axis=3))
magnitude_squared = np.power(exp_distances - centers, 2)
gauss_manual = np.exp(-beta * magnitude_squared)
# Next, we compute the polynomial modulation
zeros = np.zeros_like(distances)
modulation = np.where(distances < high_cutoff,
1 - 6.0 * np.power((distances/high_cutoff), 5)
+ 15.0 * np.power((distances/high_cutoff), 4)
- 10.0 * np.power((distances/high_cutoff), 3),
zeros)
modulation = np.expand_dims(modulation, axis=3)
polynomial_cutoff_rbf_manual = modulation * gauss_manual
# Map tiny values to zero
polynomial_cutoff_rbf_manual = np.where(
np.abs(polynomial_cutoff_rbf_manual) > polynomial_cutoff_rbf.tolerance,
polynomial_cutoff_rbf_manual,
np.zeros_like(polynomial_cutoff_rbf_manual)
)
polynomial_cutoff_rbf_manual = torch.tensor(
polynomial_cutoff_rbf_manual) * neighbor_mask[:, :, :, None].double()
np.testing.assert_array_almost_equal(polynomial_cutoff_rbf_layer.numpy(),
polynomial_cutoff_rbf_manual.numpy())
def test_polynomial_cutoff_rbf_normalize():
# Tests to make sure that the output of PolynomialCutoffRBF is properly
# normalized if 'normalize_output' is specified as True
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = np.random.randn(frames, beads, beads - 1).astype(np.float64)
# Define random parameters for the polynomial_cutoff RBF
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
alpha = np.random.uniform(0.1, 1.0)
# Calculate Gaussian expansion using the implemented layer
polynomial_cutoff_rbf = PolynomialCutoffRBF(high_cutoff=high_cutoff,
low_cutoff=low_cutoff,
n_gaussians=n_gaussians,
alpha=alpha,
normalize_output=True,
tolerance=1e-8)
polynomial_cutoff_rbf_layer = polynomial_cutoff_rbf.forward(
torch.tensor(distances))
# Manually calculate expansion with numpy
# First, we compute the centers and the scaling factors
centers = np.linspace(np.exp(-high_cutoff), np.exp(-low_cutoff),
n_gaussians).astype(np.float64)
beta = np.power(((2/n_gaussians) * (1-np.exp(-high_cutoff))), -2)
# Next, we compute the gaussian portion
exp_distances = np.exp(-alpha * | np.expand_dims(distances, axis=3) | numpy.expand_dims |
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from darkgreybox.models import Ti, TiTe, TiTeTh, TiTeThRia, TiTh
class TiTest(unittest.TestCase):
def test__model(self):
params = {
'Ti0': {'value': 10},
'Ria': {'value': 4},
'Ci': {'value': 0.25},
}
X = {
'Ta': np.array([10, 10, 10]),
'Ph': np.array([10, 0, 0]),
}
m = Ti(params=params, rec_duration=1)
actual_result = m.model(m.params, X)
assert_array_equal(np.array([10, 50, 10]), actual_result.var['Ti'])
assert_array_equal(actual_result.Z, actual_result.var['Ti'])
def test__fit(self):
y = np.array([10, 10, 20])
params = {
'Ti0': {'value': 10},
'Ria': {'value': 1},
'Ci': {'value': 1},
}
X = {
'Ta': np.array([10, 10, 10]),
'Ph': np.array([0, 10, 0]),
}
m = Ti(params=params, rec_duration=1) \
.fit(X=X, y=y, method='nelder')
for k, v in params.items():
self.assertAlmostEqual(v['value'], m.result.params[k].value, places=3)
assert_array_equal(y, m.model(m.result.params, X).Z)
class TiThTest(unittest.TestCase):
def test__model(self):
params = {
'Ti0': {'value': 10},
'Th0': {'value': 20},
'Rih': {'value': 2},
'Ria': {'value': 4},
'Ci': {'value': 0.25},
'Ch': {'value': 0.5}
}
X = {
'Ta': np.array([10, 10, 10]),
'Ph': | np.array([10, 0, 0]) | numpy.array |
import copy
import time
import psutil
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable, grad
from HyperSphere.BO.acquisition.acquisition_functions import expected_improvement
from HyperSphere.BO.utils.sobol import sobol_generate
N_SPREAD = 20000 # Number of sobol sequence points as candidates for good initial points
N_SPRAY = 10 # Number of random perturbations of current optimum
N_INIT = 20 # Number of initial points for acquisition function maximization
N_AVAILABLE_CORE = 8 # When there is this many available cpu cores new optimization is started
MAX_OPTIMIZATION_STEP = 500
def suggest(x0, reference, inferences, acquisition_function=expected_improvement, bounds=None, pool=None):
max_step = MAX_OPTIMIZATION_STEP
n_init = x0.size(0)
start_time = time.time()
print(('Acqusition function optimization with %2d inits %s has begun' % (n_init, time.strftime('%H:%M:%S', time.gmtime(start_time)))))
# Parallel version and non-parallel version behave differently.
if pool is not None:
# pool = torch.multiprocessing.Pool(n_init) if parallel else None
# results = [pool.apply_async(optimize, args=(max_step, x0[p], reference, inferences, acquisition_function, bounds)) for p in range(n_init)]
results = []
process_started = [False] * n_init
process_running = [False] * n_init
process_index = 0
while process_started.count(False) > 0:
cpu_usage = psutil.cpu_percent(0.2)
run_more = (100.0 - cpu_usage) * float(psutil.cpu_count()) > 100.0 * N_AVAILABLE_CORE
if run_more:
# RuntimeError: Cowardly refusing to serialize non-leaf tensor which requires_grad,
# since autograd does not support crossing process boundaries. If you just want to
# transfer the data, call detach() on the tensor before serializing (e.g., putting it on the queue).
curr_results = pool.apply_async(optimize, args=(max_step, x0[process_index], reference, inferences, acquisition_function, bounds,))
results.append(curr_results)
process_started[process_index] = True
process_running[process_index] = True
process_index += 1
while [not res.ready() for res in results].count(True) > 0:
time.sleep(1)
for res in results:
print(type(res.get()))
return_values = [res.get() for res in results]
local_optima, optima_value = list(zip(*return_values))
else:
local_optima = []
optima_value = []
for p in range(n_init):
optimum_loc, optimum_value = optimize(max_step, x0[p], reference, inferences, acquisition_function, bounds)
local_optima.append(optimum_loc)
optima_value.append(optimum_value)
end_time = time.time()
print(('Acqusition function optimization ended %s(%s)' % (time.strftime('%H:%M:%S', time.gmtime(end_time)), time.strftime('%H:%M:%S', time.gmtime(end_time - start_time)))))
suggestion = local_optima[np.nanargmin(optima_value)]
mean, std, var, stdmax, varmax = mean_std_var(suggestion, inferences)
return suggestion, mean, std, var, stdmax, varmax
def optimize(max_step, x0, reference, inferences, acquisition_function=expected_improvement, bounds=None):
if bounds is not None:
if not hasattr(bounds, '__call__'):
def out_of_bounds(x_input):
return (x_input.data < bounds[0]).any() or (x_input.data > bounds[1]).any()
else:
out_of_bounds = bounds
#x = Variable(x0.clone().view(1, -1), requires_grad=True)
x = x0.clone().view(1, -1)
x.requires_grad_()
prev_loss = None
###--------------------------------------------------###
# This block can be modified to use other optimization method
optimizer = optim.Adam([x], lr=0.01)
for s in range(max_step):
prev_x = x.clone()
optimizer.zero_grad()
loss = -acquisition(x, reference=reference, inferences=inferences, acquisition_function=acquisition_function, in_optimization=True)
curr_loss = loss.squeeze(0)
#grad_tmp = grad([tmp_y], [tmp_x], retain_graph=True, allow_unused=True)
#grad_tmp = tmp_y.backward()
if torch.isnan(loss).any():
raise ValueError('Loss contains NaN', loss)
if torch.isnan(x).any():
raise ValueError('x contains NaN', x)
# grad_tmp = grad([loss.squeeze()], [x.squeeze(0)], retain_graph=True, allow_unused=True)
# if grad_tmp[0] is None:
# raise ValueError('gradient is None', grad_tmp[0])
########################################################################
# PyTorch 0.3.1 code; this basically does what .backward() does
# However, it also checks the stopping criteria
#x.grad = torch.zeros(1,2)
#prev_loss_numpy = prev_loss
# curr_loss_numpy = curr_loss.detach()
# ftol = (prev_loss - curr_loss) / max(1, np.abs(prev_loss), np.abs(curr_loss_numpy)) if prev_loss is not None else 1
# if torch.isnan(x.grad).any() or (ftol < 1e-9):
# break
# prev_x = x.clone()
########################################################################
loss.backward(retain_graph=True)
if torch.isnan(x.grad).any():
raise ValueError('Encountered NaN in x.grad')
curr_loss = curr_loss.detach().numpy()
ftol = (prev_loss - curr_loss) / max(1, np.abs(prev_loss), np.abs(curr_loss)) if prev_loss is not None else 1
if ftol < 1e-9:
break
optimizer.step()
prev_loss = curr_loss
if bounds is not None and out_of_bounds(x):
x = prev_x
break
###--------------------------------------------------###
optimum_loc = x.clone()
optimum_value = -acquisition(x, reference=reference, inferences=inferences, acquisition_function=acquisition_function, in_optimization=True).item()
return optimum_loc, optimum_value
def deepcopy_inference(inference, param_samples):
inferences = []
for s in range(param_samples.size(0)):
model = copy.deepcopy(inference.model)
deepcopied_inference = inference.__class__((inference.train_x, inference.train_y), model)
deepcopied_inference.cholesky_update(param_samples[s])
inferences.append(deepcopied_inference)
return inferences
def acquisition(x, reference, inferences, acquisition_function=expected_improvement, in_optimization=False):
acquisition_sample_list = []
numerically_stable_list = []
zero_pred_var_list = []
for s in range(len(inferences)):
pred_dist = inferences[s].predict(x, in_optimization=in_optimization)
pred_mean_sample = pred_dist[0]
pred_var_sample = pred_dist[1]
numerically_stable_list.append(pred_dist[2])
zero_pred_var_list.append(pred_dist[3])
acquisition_sample_list.append(acquisition_function(pred_mean_sample[:, 0], pred_var_sample[:, 0], reference=reference))
sample_info = (np.sum(numerically_stable_list), np.sum(zero_pred_var_list), len(numerically_stable_list))
if in_optimization:
return torch.stack(acquisition_sample_list, 1).sum(1, keepdim=True)
else:
return torch.stack(acquisition_sample_list, 1).sum(1, keepdim=True), sample_info
def mean_std_var(x, inferences):
mean_sample_list = []
std_sample_list = []
var_sample_list = []
stdmax_sample_list = []
varmax_sample_list = []
for s in range(len(inferences)):
pred_dist = inferences[s].predict(x)
pred_mean_sample = pred_dist[0]
pred_var_sample = pred_dist[1]
pred_std_sample = pred_var_sample ** 0.5
varmax_sample = torch.exp(inferences[s].log_kernel_amp())
stdmax_sample = varmax_sample ** 0.5
mean_sample_list.append(pred_mean_sample.data)
std_sample_list.append(pred_std_sample.data)
var_sample_list.append(pred_var_sample.data)
stdmax_sample_list.append(stdmax_sample.data)
varmax_sample_list.append(varmax_sample.data)
return torch.cat(mean_sample_list, 1).mean(1, keepdim=True), \
torch.cat(std_sample_list, 1).mean(1, keepdim=True), \
torch.cat(var_sample_list, 1).mean(1, keepdim=True), \
torch.cat(stdmax_sample_list).mean(0, keepdim=True), \
torch.cat(varmax_sample_list).mean(0, keepdim=True)
def optimization_candidates(input, output, lower_bnd, upper_bnd):
ndim = input.size(1)
min_ind = torch.min(output.data, 0)[1]
x0_spray = input.data[min_ind].view(1, -1).repeat(N_SPRAY, 1) + input.data.new(N_SPRAY, ndim).normal_() * 0.001 * (upper_bnd - lower_bnd)
if hasattr(lower_bnd, 'size'):
x0_spray[x0_spray < lower_bnd] = 2 * lower_bnd.view(1, -1).repeat(2 * N_SPRAY, 1) - x0_spray[x0_spray < lower_bnd]
else:
x0_spray[x0_spray < lower_bnd] = 2 * lower_bnd - x0_spray[x0_spray < lower_bnd]
if hasattr(upper_bnd, 'size'):
x0_spray[x0_spray > upper_bnd] = 2 * upper_bnd.view(1, -1).repeat(2 * N_SPRAY, 1) - x0_spray[x0_spray > upper_bnd]
else:
x0_spray[x0_spray > upper_bnd] = 2 * upper_bnd - x0_spray[x0_spray > upper_bnd]
if ndim <= 1100:
x0_spread = sobol_generate(ndim, N_SPREAD, np.random.randint(0, N_SPREAD)).type_as(input.data) * (upper_bnd - lower_bnd) + lower_bnd
else:
x0_spread = torch.FloatTensor(N_SPREAD, ndim).uniform_().type_as(input.data) * (upper_bnd - lower_bnd) + lower_bnd
x0 = torch.cat([input.data, x0_spray, x0_spread], 0)
nonzero_radius_mask = torch.sum(x0 ** 2, 1) > 0
nonzero_radius_ind = torch.sort(nonzero_radius_mask, 0, descending=True)[1][:torch.sum(nonzero_radius_mask)]
x0 = x0.index_select(0, nonzero_radius_ind)
return Variable(x0)
def optimization_init_points(candidates, reference, inferences, acquisition_function=expected_improvement):
start_time = time.time()
ndim = candidates.size(1)
acq_value, sample_info = acquisition(candidates, reference, inferences, acquisition_function, False)
acq_value = acq_value.data
nonnan_ind = acq_value == acq_value
acq_value = acq_value[nonnan_ind]
init_points = candidates.data[nonnan_ind.view(-1, 1).repeat(1, ndim)].view(-1, ndim)
_, sort_ind = torch.sort(acq_value, 0, descending=True)
is_maximum = acq_value == acq_value[sort_ind[0]]
n_equal_maximum = torch.sum(is_maximum)
print((('Initial points selection from %d points ' % candidates.size(0)) + time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))))
if n_equal_maximum > N_INIT:
shuffled_ind = torch.sort(torch.randn(n_equal_maximum), 0)[1]
return init_points[is_maximum.view(-1, 1).repeat(1, ndim)].view(-1, ndim)[(shuffled_ind < N_INIT).view(-1, 1).repeat(1, ndim)].view(-1, ndim), sample_info
else:
return init_points[sort_ind][:N_INIT], sample_info
def one_dim_plotting(ax1, ax2, inference, param_samples, color, ls='-', label='', title_str=''):
pred_x = torch.linspace(-2, 2, 100).view(-1, 1)
if param_samples.dim() == 1:
param_samples = param_samples.unsqueeze(0).clone()
n_samples = param_samples.size()[0]
pred_mean = 0
pred_var = 0
nll = 0
pred_std = 0
for s in range(n_samples):
pred_mean_sample, pred_var_sample = inference.predict(Variable(pred_x), param_samples[s])
pred_std_sample = torch.sqrt(pred_var_sample)
pred_mean += pred_mean_sample.data
pred_var += pred_var_sample.data
nll += inference.negative_log_likelihood(param_samples[s]).data.squeeze()[0]
pred_std += pred_std_sample.data
pred_mean /= n_samples
pred_var /= n_samples
nll /= n_samples
pred_std /= n_samples
ax1.plot(pred_x.numpy().flatten(), pred_mean.numpy().flatten(), color=color)
ax1.fill_between(pred_x.numpy().flatten(), (pred_mean - 1.96 * pred_std).numpy().flatten(),
(pred_mean + 1.96 * pred_std).numpy().flatten(), facecolor=color, alpha=0.2)
ax1.set_title(title_str + '\n%.4E' % nll)
acq = acquisition(Variable(pred_x), inference, param_samples, acquisition_function=expected_improvement,
reference=reference).data
# next_point = suggest(inference, param_samples_sampling, reference=reference).numpy()
# ax2.fill_between(pred_x.numpy().flatten(), 0, acq.numpy().flatten(), color=color, alpha=0.2, label=label)
ax2.plot(pred_x.numpy(), acq.numpy(), color=color, ls=ls, alpha=1.0, label=label)
ax2.legend()
# ax2.axvline(next_point, color=color, ls='--', alpha=0.5)
if __name__ == '__main__':
from HyperSphere.GP.kernels.modules.squared_exponential import SquaredExponentialKernel
from HyperSphere.GP.models.gp_regression import GPRegression
from HyperSphere.GP.inference.inference import Inference
import matplotlib.pyplot as plt
ndata = 6
ndim = 1
model_for_generating = GPRegression(kernel=SquaredExponentialKernel(ndim))
train_x = Variable(torch.FloatTensor(ndata, ndim).uniform_(-2, 2))
chol_L = torch.potrf(
(model_for_generating.kernel(train_x) + torch.diag(model_for_generating.likelihood(train_x))).data, upper=False)
train_y = model_for_generating.mean(train_x) + Variable(torch.mm(chol_L, torch.randn(ndata, 1)))
# train_y = torch.sin(2 * math.pi * torch.sum(train_x, 1, keepdim=True)) + Variable(torch.FloatTensor(train_x.size(0), 1).normal_())
train_data = (train_x, train_y)
param_original = model_for_generating.param_to_vec()
reference = torch.min(train_y.data)
model_for_learning = GPRegression(kernel=SquaredExponentialKernel(ndim))
inference = Inference(train_data, model_for_learning)
model_for_learning.vec_to_param(param_original)
param_samples_learning = inference.learning(n_restarts=10)
model_for_learning.vec_to_param(param_original)
param_samples_sampling = inference.sampling(n_sample=5, n_burnin=200, n_thin=10)
if ndim == 1:
ax11 = plt.subplot(221)
ax11.plot(train_x.data.numpy().flatten(), train_y.data.numpy().flatten(), 'k*')
ax11.axhline(reference, ls='--', alpha=0.5)
ax12 = plt.subplot(222, sharex=ax11, sharey=ax11)
ax12.plot(train_x.data.numpy().flatten(), train_y.data.numpy().flatten(), 'k*')
ax12.axhline(reference, ls='--', alpha=0.5)
ax21 = plt.subplot(223, sharex=ax11)
ax22 = plt.subplot(224, sharex=ax11)
# model_for_learning.elastic_vec_to_param(param_original, func)
# param_original_elastic = model_for_learning.param_to_vec()
# one_dim_plotting(axes[0, 0], axes[1, 0], inference, param_original, 'b')
# one_dim_plotting(axes[0, 1], axes[1, 1], inference, param_original_elastic, 'r')
step = 3
for i in range(-step, step+1):
if i == 0:
one_dim_plotting(ax11, ax21, inference, param_samples_learning, color='k', label='target')
one_dim_plotting(ax12, ax22, inference, param_samples_sampling, color='k', label='target')
else:
color = | np.random.rand(3) | numpy.random.rand |
"""
This file is part of the repo: https://github.com/tencent-ailab/hifi3dface
If you find the code useful, please cite our paper:
"High-Fidelity 3D Digital Human Creation from RGB-D Selfies."
<NAME>*, <NAME>*, <NAME>*, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
arXiv: https://arxiv.org/abs/2010.05562
Copyright (c) [2020] [Tencent AI Lab]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import cv2, os, importlib, math
import os.path as osp
import numpy as np
import scipy.io as scio
import tensorflow as tf
def create_mtcnn_pb(sess):
pnet_fun = lambda img: sess.run(
("pnet/conv4-2/BiasAdd:0", "pnet/prob1:0"), feed_dict={"pnet/input:0": img}
)
rnet_fun = lambda img: sess.run(
("rnet/conv5-2/conv5-2:0", "rnet/prob1:0"), feed_dict={"rnet/input:0": img}
)
onet_fun = lambda img: sess.run(
("onet/conv6-2/conv6-2:0", "onet/conv6-3/conv6-3:0", "onet/prob1:0"),
feed_dict={"onet/input:0": img},
)
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
factor_count = 0
total_boxes = np.empty((0, 9))
points = np.empty(0)
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / minsize
minl = minl * m
# create scale pyramid
scales = []
while minl >= 12:
scales += [m * np.power(factor, factor_count)]
minl = minl * factor
factor_count += 1
# first stage
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = generateBoundingBox(
out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0]
)
# inter-scale nms
pick = nms(boxes.copy(), 0.5, "Union")
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox > 0:
pick = nms(total_boxes.copy(), 0.7, "Union")
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1 : edy[k], dx[k] - 1 : edx[k], :] = img[
y[k] - 1 : ey[k], x[k] - 1 : ex[k], :
]
if (
tmp.shape[0] > 0
and tmp.shape[1] > 0
or tmp.shape[0] == 0
and tmp.shape[1] == 0
):
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > threshold[1])
total_boxes = np.hstack(
[total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]
)
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, "Union")
total_boxes = total_boxes[pick, :]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1 : edy[k], dx[k] - 1 : edx[k], :] = img[
y[k] - 1 : ey[k], x[k] - 1 : ex[k], :
]
if (
tmp.shape[0] > 0
and tmp.shape[1] > 0
or tmp.shape[0] == 0
and tmp.shape[1] == 0
):
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack(
[total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]
)
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = (
np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
)
points[5:10, :] = (
np.tile(h, (5, 1)) * points[5:10, :]
+ | np.tile(total_boxes[:, 1], (5, 1)) | numpy.tile |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor
import numpy as np
import unittest
class TestLoDTensor(unittest.TestCase):
def test_pybind_recursive_seq_lens(self):
tensor = fluid.LoDTensor()
recursive_seq_lens = []
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
recursive_seq_lens = [[], [1], [3]]
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,
recursive_seq_lens)
recursive_seq_lens = [[0], [2], [3]]
self.assertRaises(Exception, tensor.set_recursive_sequence_lengths,
recursive_seq_lens)
recursive_seq_lens = [[1, 2, 3]]
tensor.set_recursive_sequence_lengths(recursive_seq_lens)
self.assertEqual(tensor.recursive_sequence_lengths(),
recursive_seq_lens)
tensor.set( | np.random.random([6, 1]) | numpy.random.random |
import os
import pickle
import regex
import hashlib
from multiprocessing import Pool
import numpy as np
import soundfile as sf
from scipy import signal
from scipy.signal import get_window
from librosa.filters import mel
from numpy.random import RandomState
import argparse
from utils import *
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def pySTFT(x, fft_length=1024, hop_length=256):
x = np.pad(x, int(fft_length//2), mode='reflect')
noverlap = fft_length - hop_length
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//hop_length, fft_length)
strides = x.strides[:-1]+(hop_length*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
fft_window = get_window('hann', fft_length, fftbins=True)
result = np.fft.rfft(fft_window * result, n=fft_length).T
return | np.abs(result) | numpy.abs |
import numpy as np
import os
import torch
import torchvision
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, random_split
from torchvision import transforms
import PIL.Image as Image
from sklearn.datasets import load_boston
np.random.seed(42)
torch.manual_seed(42)
#TODO: Add func for att imbalance
from torch.utils.data import Dataset
class custom_subset(Dataset):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
labels(sequence) : targets as required for the indices. will be the same length as indices
"""
def __init__(self, dataset, indices, labels):
self.dataset = torch.utils.data.Subset(dataset, indices)
self.targets = labels.type(torch.long)
def __getitem__(self, idx):
image = self.dataset[idx][0]
target = self.targets[idx]
return (image, target)
def __len__(self):
return len(self.targets)
class DataHandler_CIFAR10(Dataset):
"""
Data Handler to load CIFAR10 dataset.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
select: bool
True if loading data without labels, False otherwise
"""
def __init__(self, X, Y=None, select=True, use_test_transform = False):
"""
Constructor
"""
self.select = select
if(use_test_transform):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
if not self.select:
self.X = X
self.targets = Y
self.transform = transform
else:
self.X = X
self.transform = transform
def __getitem__(self, index):
if not self.select:
x, y = self.X[index], self.targets[index]
x = Image.fromarray(x)
x = self.transform(x)
return (x, y)
else:
x = self.X[index]
x = Image.fromarray(x)
x = self.transform(x)
return x
def __len__(self):
return len(self.X)
def create_random_train(fullset, split_cfg, num_cls, isnumpy, augVal):
np.random.seed(42)
full_idx = list(range(len(fullset)))
train_idx = list(np.random.choice(np.array(full_idx), size=split_cfg['train_size'], replace=False))
lake_idx = list(set(full_idx) - set(train_idx))
val_idx = list(np.random.choice(np.array(lake_idx), size=split_cfg['val_size'], replace=False))
lake_idx = list(set(lake_idx) - set(val_idx))
train_set = custom_subset(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx])
val_set = custom_subset(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx])
lake_set = custom_subset(fullset, lake_idx, torch.Tensor(fullset.targets)[lake_idx])
return train_set, val_set, lake_set
def create_uniform_train(fullset, split_cfg, num_cls, isnumpy, augVal):
np.random.seed(42)
train_idx = []
val_idx = []
lake_idx = []
for i in range(num_cls): #all_classes
full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy())
class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['train_size']//num_cls, replace=False))
remain_idx = list(set(full_idx_class) - set(class_train_idx))
train_idx += class_train_idx
lake_idx += remain_idx
val_idx = list(np.random.choice(np.array(lake_idx), size=split_cfg['val_size'], replace=False))
lake_idx = list(set(lake_idx) - set(val_idx))
train_set = custom_subset(fullset, train_idx, torch.Tensor(fullset.targets)[train_idx])
val_set = custom_subset(fullset, val_idx, torch.Tensor(fullset.targets)[val_idx])
lake_set = custom_subset(fullset, lake_idx, torch.Tensor(fullset.targets)[lake_idx])
return train_set, val_set, lake_set
def create_class_imb(fullset, split_cfg, num_cls, isnumpy, augVal):
np.random.seed(42)
train_idx = []
val_idx = []
lake_idx = []
selected_classes = np.random.choice(np.arange(num_cls), size=split_cfg['num_cls_imbalance'], replace=False) #classes to imbalance
for i in range(num_cls): #all_classes
full_idx_class = list(torch.where(torch.Tensor(fullset.targets) == i)[0].cpu().numpy())
if(i in selected_classes):
class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_imbclass_train'], replace=False))
remain_idx = list(set(full_idx_class) - set(class_train_idx))
class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_val'], replace=False))
remain_idx = list(set(remain_idx) - set(class_val_idx))
class_lake_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_imbclass_lake'], replace=False))
else:
class_train_idx = list(np.random.choice(np.array(full_idx_class), size=split_cfg['per_class_train'], replace=False))
remain_idx = list(set(full_idx_class) - set(class_train_idx))
class_val_idx = list(np.random.choice(np.array(remain_idx), size=split_cfg['per_class_val'], replace=False))
remain_idx = list(set(remain_idx) - set(class_val_idx))
class_lake_idx = list(np.random.choice( | np.array(remain_idx) | numpy.array |
import unittest
import backend as F
import numpy as np
import gzip
import tempfile
import os
import pandas as pd
import yaml
import pytest
import dgl.data as data
import dgl.data.csv_dataset as csv_ds
from dgl import DGLError
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_minigc():
ds = data.MiniGCDataset(16, 10, 20)
g, l = list(zip(*ds))
print(g, l)
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_gin():
ds_n_graphs = {
'MUTAG': 188,
'IMDBBINARY': 1000,
'IMDBMULTI': 1500,
'PROTEINS': 1113,
'PTC': 344,
}
for name, n_graphs in ds_n_graphs.items():
ds = data.GINDataset(name, self_loop=False, degree_as_nlabel=False)
assert len(ds) == n_graphs, (len(ds), name)
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_fraud():
g = data.FraudDataset('amazon')[0]
assert g.num_nodes() == 11944
g = data.FraudAmazonDataset()[0]
assert g.num_nodes() == 11944
g = data.FraudYelpDataset()[0]
assert g.num_nodes() == 45954
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_fakenews():
ds = data.FakeNewsDataset('politifact', 'bert')
assert len(ds) == 314
ds = data.FakeNewsDataset('gossipcop', 'profile')
assert len(ds) == 5464
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_tudataset_regression():
ds = data.TUDataset('ZINC_test', force_reload=True)
assert len(ds) == 5000
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_data_hash():
class HashTestDataset(data.DGLDataset):
def __init__(self, hash_key=()):
super(HashTestDataset, self).__init__(
'hashtest', hash_key=hash_key)
def _load(self):
pass
a = HashTestDataset((True, 0, '1', (1, 2, 3)))
b = HashTestDataset((True, 0, '1', (1, 2, 3)))
c = HashTestDataset((True, 0, '1', (1, 2, 4)))
assert a.hash == b.hash
assert a.hash != c.hash
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_citation_graph():
# cora
g = data.CoraGraphDataset()[0]
assert g.num_nodes() == 2708
assert g.num_edges() == 10556
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# Citeseer
g = data.CiteseerGraphDataset()[0]
assert g.num_nodes() == 3327
assert g.num_edges() == 9228
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# Pubmed
g = data.PubmedGraphDataset()[0]
assert g.num_nodes() == 19717
assert g.num_edges() == 88651
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_gnn_benchmark():
# AmazonCoBuyComputerDataset
g = data.AmazonCoBuyComputerDataset()[0]
assert g.num_nodes() == 13752
assert g.num_edges() == 491722
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# AmazonCoBuyPhotoDataset
g = data.AmazonCoBuyPhotoDataset()[0]
assert g.num_nodes() == 7650
assert g.num_edges() == 238163
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# CoauthorPhysicsDataset
g = data.CoauthorPhysicsDataset()[0]
assert g.num_nodes() == 34493
assert g.num_edges() == 495924
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# CoauthorCSDataset
g = data.CoauthorCSDataset()[0]
assert g.num_nodes() == 18333
assert g.num_edges() == 163788
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
# CoraFullDataset
g = data.CoraFullDataset()[0]
assert g.num_nodes() == 19793
assert g.num_edges() == 126842
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_reddit():
# RedditDataset
g = data.RedditDataset()[0]
assert g.num_nodes() == 232965
assert g.num_edges() == 114615892
dst = F.asnumpy(g.edges()[1])
assert np.array_equal(dst, np.sort(dst))
@unittest.skipIf(F._default_context_str == 'gpu', reason="Datasets don't need to be tested on GPU.")
def test_extract_archive():
# gzip
with tempfile.TemporaryDirectory() as src_dir:
gz_file = 'gz_archive'
gz_path = os.path.join(src_dir, gz_file + '.gz')
content = b"test extract archive gzip"
with gzip.open(gz_path, 'wb') as f:
f.write(content)
with tempfile.TemporaryDirectory() as dst_dir:
data.utils.extract_archive(gz_path, dst_dir, overwrite=True)
assert os.path.exists(os.path.join(dst_dir, gz_file))
def _test_construct_graphs_homo():
# node_ids could be non-sorted, duplicated, not labeled from 0 to num_nodes-1
num_nodes = 100
num_edges = 1000
num_dims = 3
num_dup_nodes = int(num_nodes*0.2)
node_ids = np.random.choice(
np.arange(num_nodes*2), size=num_nodes, replace=False)
assert len(node_ids) == num_nodes
np.random.shuffle(node_ids)
node_ids = np.hstack((node_ids, node_ids[:num_dup_nodes]))
t_ndata = {'feat': np.random.rand(num_nodes+num_dup_nodes, num_dims),
'label': np.random.randint(2, size=num_nodes+num_dup_nodes)}
_, u_indices = np.unique(node_ids, return_index=True)
ndata = {'feat': t_ndata['feat'][u_indices],
'label': t_ndata['label'][u_indices]}
node_data = csv_ds.NodeData(node_ids, t_ndata)
src_ids = np.random.choice(node_ids, size=num_edges)
dst_ids = np.random.choice(node_ids, size=num_edges)
edata = {'feat': np.random.rand(
num_edges, num_dims), 'label': np.random.randint(2, size=num_edges)}
edge_data = csv_ds.EdgeData(src_ids, dst_ids, edata)
graphs, data_dict = csv_ds.DGLGraphConstructor.construct_graphs(
node_data, edge_data)
assert len(graphs) == 1
assert len(data_dict) == 0
g = graphs[0]
assert g.is_homogeneous
assert g.num_nodes() == num_nodes
assert g.num_edges() == num_edges
def assert_data(lhs, rhs):
for key, value in lhs.items():
assert key in rhs
assert F.array_equal(F.tensor(value), rhs[key])
assert_data(ndata, g.ndata)
assert_data(edata, g.edata)
def _test_construct_graphs_hetero():
# node_ids could be non-sorted, duplicated, not labeled from 0 to num_nodes-1
num_nodes = 100
num_edges = 1000
num_dims = 3
num_dup_nodes = int(num_nodes*0.2)
ntypes = ['user', 'item']
node_data = []
node_ids_dict = {}
ndata_dict = {}
for ntype in ntypes:
node_ids = np.random.choice(
np.arange(num_nodes*2), size=num_nodes, replace=False)
assert len(node_ids) == num_nodes
np.random.shuffle(node_ids)
node_ids = np.hstack((node_ids, node_ids[:num_dup_nodes]))
t_ndata = {'feat': | np.random.rand(num_nodes+num_dup_nodes, num_dims) | numpy.random.rand |
# -*- coding:utf-8 -*-
"""
File Name: inferece3_RulerDivDet.py
Description : DF第三步:Retinanet工程-标尺及标尺刻度检测
Author : royce.mao
date: 2019/09/02
"""
import argparse
import skimage
import os
import cv2
import tqdm
import time
import torch
import numpy as np
import tensorflow as tf
from utils import load_image
from torchvision import ops
from PIL import Image
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
MEAN_RET = np.array([[[0.485, 0.456, 0.406]]])
STD_RET = np.array([[[0.229, 0.224, 0.225]]])
MEAN_RES = (0.49139968, 0.48215827, 0.44653124)
STD_RES = (0.24703233, 0.24348505, 0.26158768)
# CLASS_MAPPING = {"6":0, "7":1}
RET_MAPPING = {"div":0}
RES_MAPPING = {'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'back': 10}
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(DEVICE, 'is avaliable!')
# Retinanet工程inference
class Detection(object):
def __init__(self,):
# model
self.model_ret = torch.load(parser.ret_weights) # cfg.RET_WEIGHTS
self.model_ret.eval()
self.model_ret.to(DEVICE)
# mapping
self.labels = {}
for key, value in RET_MAPPING.items():
self.labels[value] = key
super(Detection, self).__init__()
def build_transform(self, image, min_size=608, max_size=1024):
"""
数据增广
:param image: numpy(H,W,C)
:param min_size:
:param max_size:
:return: tensor(B,C,H,W), scale因子
"""
H, W, C = image.shape
scale1 = min_size / min(H, W)
scale2 = max_size / max(H, W)
scale = min(scale1, scale2)
# resize the image with the computed scale
img = skimage.transform.resize(image, (int(round(H * scale)), int(round((W * scale)))), mode='constant')
img -= MEAN_RET
img /= STD_RET
new_H, new_W, new_C = img.shape
pad_H = 32 - new_H % 32
pad_W = 32 - new_W % 32
new_image = np.zeros((new_H + pad_H, new_W + pad_W, new_C)).astype(np.float32)
new_image[:new_H, :new_W, :] = img.astype(np.float32)
new_image = np.expand_dims(new_image, axis=0) # add batch dim
return torch.from_numpy(new_image).permute(0, 3, 1, 2), scale
def unbuild_transform(self, image, boxes, scale):
"""
增广的图像返回(逆增广)
:param image: tensor(C,H,W)
:param boxes: 2维tensor(num_div, (x1,y1,x2,y2))
:param scale:
:return: numpy(H,W,C), 2维numpy(num_div, 坐标还原后的(x1,y1,x2,y2))
"""
# img的像素值还原
for t, m, s in zip(image, MEAN_RET[0][0], STD_RET[0][0]):
t.mul_(s).add_(m)
img = np.array(255 * image).copy()
img[img < 0] = 0
img[img > 255] = 255
# box的size还原到原图
boxes[:, :4] /= scale
# img的size还原到原图
C, H, W = img.shape
img = np.transpose(img, (1, 2, 0))
img = skimage.transform.resize(img, (int(round(H / scale)), int(round((W / scale)))), mode='constant')
# img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB)
return img, boxes
def __call__(self, image_path, image_name):
"""
:param image_path:
:return:
"""
coords = []
with torch.no_grad():
# 进入网络输入
img_tensor, scale = self.build_transform(load_image(image_path))
# 网络前向传播输出
start_time = time.time()
scores, classification, proposals = self.model_ret(img_tensor.to(DEVICE).float())
time_ret = time.time() - start_time
# nms
keep = ops.nms(proposals, scores, 0.01) # 固定0.3
# unbuild_transform
idxs = np.where(scores.cpu().numpy() > parser.threshold) # todo: 阈值过滤
img_restore, boxes_restore = self.unbuild_transform(img_tensor[0].cpu(), proposals[keep], scale)
for i in range(idxs[0].shape[0]):
try:
bbox = boxes_restore[idxs[0][i], :]
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
#
coords.append([x1,y1,x2,y2])
except Exception as e:
return None, None, time_consume
return np.array(coords), img_restore, time_ret
# ResNet18对label做修正
class RoiAlign(object):
def __init__(self):
""" """
super(RoiAlign, self).__init__()
self.model_res = torch.load(parser.res_weights) # cfg.RES_WEIGHTS
self.model_res.eval()
self.model_res.to(DEVICE)
# mapping
self.labels = {}
for key, value in RES_MAPPING.items():
self.labels[value] = key
# transforms
self.test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(MEAN_RES, STD_RES)
])
def xyxy2yxyx(self, coords, img):
"""
:param coords: list 比如:[[x1,y1,x2,y2],[...]]
:param img: numpy 需要crop的img对象
:return:
"""
H, W = img.shape[1:3]
coords = np.array(coords, dtype=np.float32)
print(coords)
coords[:, ::2] = coords[:, ::2] / W
coords[:, 1::2] = coords[:, 1::2] / H
return coords[:, [1, 0, 3, 2]]
def crop_and_resize(self, crop_size, coords, img):
"""
:param coords:
:param img:
:return: labels (pred list)
"""
# crop and resize
img = np.expand_dims(img, axis=0)
boxes = self.xyxy2yxyx(coords, img)
divs = tf.image.crop_and_resize(img, boxes, box_ind=[0] * len(boxes), crop_size=parser.crop_size)
sess = tf.Session()
divs_img = divs.eval(session=sess) # 转numpy
divs_img = divs_img.astype('uint8')
# infer
# print(divs_img.shape)
divs_tensor_list = [self.test_transforms(Image.fromarray(div_img)) for div_img in divs_img]
divs_tensor = torch.stack(divs_tensor_list)
with torch.no_grad():
start_time = time.time()
logits = self.model_res(divs_tensor.to(DEVICE))
time_res = time.time() - start_time
preds = logits.max(1, keepdim=True)[1]
labels = [self.labels[pred.item()] for pred in preds]
return labels, time_res
def draw_caption(image, box, caption):
b = | np.array(box) | numpy.array |
from functools import partial
import numpy as np
from matplotlib import pyplot as plt
from os.path import expanduser
# from https://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gpr_noisy_targets.html
def plot_prediction_uncertainty_and_data(x, f, X, y, y_pred, sigma=None, ylim=None, xlim=None, title='', filename='regression_results.png'):
plt.clf()
plt.figure()
plt.plot(x, f(x), 'r:', label=r'$f(x) = objective$')
plt.plot(X, y, 'r.', markersize=10, label='Observations')
if isinstance(y_pred, (tuple, list, np.ndarray)) and isinstance(y_pred[0], (tuple, list, np.ndarray)) and len(y_pred[0]) > 1:
for row_index, y_pred_row in enumerate(y_pred):
plt.plot(x, y_pred_row, 'b-', label='Prediction' if row_index == 0 else None)
else:
plt.plot(x, y_pred, 'b-', label='Prediction')
if sigma is not None:
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
if ylim is not None:
plt.ylim(ylim[0], ylim[1])
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
plt.legend(loc='upper left')
plt.title(title)
plt.savefig(expanduser('~/Downloads/' + filename), dpi=300)
plt.show()
def gaussian(X1, X2, widths=None):
sqdist = np.sum(X1 ** 2, 1).reshape(-1, 1) + np.sum(X2 ** 2, 1) - 2 * np.dot(X1, X2.T)
return np.exp(-0.5 / np.array(widths) ** 2 * sqdist)
def dot_product(X1, X2):
return np.outer(X1, X2)
def kernel(X1, X2=None, widths=None, noise_parameter=0.0, mean=0.0, add_constant=False, _normalize=True, multiplier=1):
"""
Isotropic squared exponential kernel.
Args:
X1: Array of m points (m x d).
X2: Array of n points (n x d).
Returns:
(m x n) matrix.
"""
if X2 is None:
self_kernel = True
X2 = X1
else:
self_kernel = False
X2 = X2
core_kernel = gaussian(X1, X2, widths=widths)
if self_kernel:
white_noise = np.eye(len(core_kernel)) * noise_parameter
constant = (np.ones(core_kernel.shape)) * mean
else:
white_noise = np.zeros(core_kernel.shape)
constant = np.ones(core_kernel.shape) * mean
unnormalized_kernel = core_kernel + white_noise ** 2 + constant
if _normalize:
normalized_kernel = (unnormalized_kernel.T / np.sqrt(np.diag(
kernel(X1, widths=widths, noise_parameter=0, mean=0, add_constant=add_constant, _normalize=False)))).T
normalized_kernel = normalized_kernel / np.sqrt(
np.diag(kernel(X2, widths=widths, noise_parameter=0, mean=0, add_constant=add_constant, _normalize=False)))
if add_constant:
return multiplier * np.hstack([np.ones((len(normalized_kernel), 1)), normalized_kernel])
else:
return multiplier * normalized_kernel
else:
if add_constant:
return multiplier * np.hstack([np.ones((len(unnormalized_kernel), 1)), unnormalized_kernel])
else:
return multiplier * unnormalized_kernel
######
# Settings
######
compare_specialized_modules = False # this requires access to our libraries which aren't yet public
basis_domain = (-5, 5)
plot_domain = (-1, 2)
training_domain = (0, 1)
ylim = (-2, 2)
widths = [0.5]
wavelength = 0.5
phase = 0
vertical_shift = 0
objective_scale = 5
noise_parameter_scale = 0.1
number_of_basis_functions = 100
number_of_evaluation_points = 500
number_of_training_points = 20
fit_intercept = True
constant_kernel_weight = 1e5
plot_range = (-4 * objective_scale + vertical_shift, 4 * objective_scale + vertical_shift)
render_plots = True
#####
def objective(X):
return objective_scale * np.sin((X / wavelength + phase) * 2 * np.pi) + vertical_shift
def get_average_subsequent_differences_in_list(ini_list):
diff_list = []
for x, y in zip(ini_list[0::], ini_list[1::]):
diff_list.append(y - x)
return np.average(diff_list)
number_of_dirac_basis_functions = max(1000, number_of_basis_functions)
basis_centers = np.linspace(basis_domain[0], basis_domain[1], number_of_basis_functions).reshape(-1, 1)
dirac_basis_centers = np.linspace(basis_domain[0], basis_domain[1], number_of_dirac_basis_functions).reshape(-1, 1)
dirac_basis_increment = get_average_subsequent_differences_in_list(dirac_basis_centers)
X = np.linspace(plot_domain[0], plot_domain[1], number_of_evaluation_points).reshape(-1, 1)
X_train = np.linspace(training_domain[0], training_domain[1], number_of_training_points).reshape(-1, 1)
Y_train = objective(X_train).reshape(-1, 1)
noise_parameter = max(1e-5, noise_parameter_scale) * objective_scale
prior_noise_parameter = objective_scale
GP_RBF_kernel = partial(kernel, widths=widths, noise_parameter=1e-3, mean=constant_kernel_weight)
GP_kernel = partial(kernel, widths=widths, noise_parameter=noise_parameter / objective_scale,
mean=constant_kernel_weight, multiplier=objective_scale ** 2)
incorrect_kernel = partial(kernel, widths=[x / 5 for x in widths], noise_parameter=noise_parameter)
BLR_basis = partial(kernel, X2=basis_centers,
widths=[x / np.sqrt(2) for x in widths],
mean=0.0,
add_constant=fit_intercept) # note that noise_parameter only applies if X2 is None
dirac_basis = partial(kernel, X2=dirac_basis_centers,
widths=[dirac_basis_increment for x in widths],
mean=0.0) # note that noise_parameter only applies if X2 is None
def normalize_basis(basis, apply_sqrt=True, ignore_first_column=False):
if ignore_first_column:
basis_for_norm = basis[:, 1:]
output_basis = basis.copy()
output_basis[:, 1:] = (basis[:, 1:].T / np.sqrt(np.diag(basis_for_norm @ basis_for_norm.T))).T
return output_basis
else:
return (basis.T / np.sqrt(np.diag(basis @ basis.T))).T
# regular (manual) basis
X_train_basis = BLR_basis(X_train)
X_basis = BLR_basis(X)
normalization_constant = np.average(np.diag(X_train_basis @ X_train_basis.T)) ** -0.5
X_train_basis = normalize_basis(X_train_basis, ignore_first_column=fit_intercept)
X_basis = normalize_basis(X_basis, ignore_first_column=fit_intercept)
# apply eigenbasis
K = GP_RBF_kernel(dirac_basis_centers)
eigenvalues, eigenvectors = np.linalg.eigh(K)
eigenbasis = eigenvectors.T
X_train_dirac_basis = dirac_basis(X_train)
X_dirac_basis = dirac_basis(X)
X_train_dirac_basis = np.square(normalize_basis(X_train_dirac_basis))
X_dirac_basis = np.square(normalize_basis(X_dirac_basis))
X_train_eigenbasis = X_train_dirac_basis @ eigenbasis.T @ np.diag(np.sqrt(eigenvalues))
X_eigenbasis = X_dirac_basis @ eigenbasis.T @ np.diag(np.sqrt(eigenvalues))
eigenvalues, eigenvectors = np.linalg.eigh(noise_parameter ** -2 * X_train_basis.T @ X_train_basis)
number_of_effective_parameters = sum(x / (prior_noise_parameter ** -2 + x) for x in np.real(eigenvalues))
print(f'number of effective parameters (gamma): {number_of_effective_parameters}')
print(f'number of training points: {number_of_training_points}')
######
# BLR
######
if fit_intercept:
number_of_features_with_intercept = number_of_basis_functions + 1
else:
number_of_features_with_intercept = number_of_basis_functions
regularization_matrix = noise_parameter ** 2 * prior_noise_parameter ** -2 * np.eye(number_of_features_with_intercept)
if fit_intercept:
regularization_matrix[0, 0] = 0
S_matrix = np.linalg.pinv(X_train_basis.T @ X_train_basis + regularization_matrix)
beta_means = S_matrix @ X_train_basis.T @ Y_train
BLR_predictions = X_basis @ beta_means
BLR_predictions_at_X_train = X_train_basis @ beta_means
BLR_cov = noise_parameter ** 2 * (X_basis @ S_matrix @ X_basis.T + np.eye(len(X_basis)))
BLR_sigmas = np.sqrt(np.abs(np.diag(BLR_cov))).reshape(-1, 1)
empirical_noise_parameter = np.sqrt(np.sum((X_train_basis @ beta_means - Y_train) ** 2) / len(BLR_predictions))
######
# BLR -- Eigenbasis
######
prior_covariance_eigen_matrix = prior_noise_parameter ** 2 * np.eye(len(X_train_eigenbasis.T))
covariance_eigen_matrix = np.linalg.pinv(
noise_parameter ** -2 * X_train_eigenbasis.T @ X_train_eigenbasis + np.linalg.pinv(prior_covariance_eigen_matrix))
beta_eigen_means = noise_parameter ** -2 * covariance_eigen_matrix @ X_train_eigenbasis.T @ Y_train
BLR_eigen_predictions = X_eigenbasis @ beta_eigen_means
BLR_eigen_predictions_at_X_train = X_train_eigenbasis @ beta_eigen_means
BLR_eigen_cov = X_eigenbasis @ covariance_eigen_matrix @ X_eigenbasis.T + noise_parameter ** 2 * np.eye(
len(X_eigenbasis))
BLR_eigen_sigmas = np.sqrt(np.abs(np.diag(BLR_eigen_cov))).reshape(-1, 1)
eigen_empirical_noise_parameter = np.sqrt(
np.sum((X_train_eigenbasis @ beta_eigen_means - Y_train) ** 2) / len(BLR_eigen_predictions))
######
# BLR - Specialized modules
######
if compare_specialized_modules:
from src.bayes_linear_regressor import BayesLinearRegressor, BayesLinearAlgebraLinearRegressor
from src.support_regressors import BasisAdapter, TuningAdapter
# note that l2_regularization_constant is alpha/beta in http://krasserm.github.io/2019/02/23/bayesian-linear-regression/
# that is, with alpha = prior_noise_parameter ** -2 = 1,
# l2_regularization_constant = noise_parameter ** 2
BLR_learn_sigma_and_prior_noise_parameter_object = BasisAdapter(
regressor=BayesLinearRegressor(
l2_regularization_constant=None,
fixed_prior_noise_parameter=None,
fixed_noise_parameter=None,
fit_intercept=fit_intercept
),
domains=[basis_domain],
sampling_factors=number_of_basis_functions,
widths=[x / np.sqrt(2) for x in widths]
) # note that kernel_noise_parameter default is 0
BLR_learn_sigma_and_prior_noise_parameter_object.fit(X_train, Y_train)
BLR_learn_sigma_and_prior_noise_parameter_object_predictions, \
BLR_learn_sigma_and_prior_noise_parameter_object_sigmas = \
BLR_learn_sigma_and_prior_noise_parameter_object.predict(X, return_std=True)
BLR_learn_sigma_object = BasisAdapter(
regressor=BayesLinearRegressor(
l2_regularization_constant=None,
fixed_prior_noise_parameter=prior_noise_parameter,
fixed_noise_parameter=None,
fit_intercept=fit_intercept
),
domains=[basis_domain],
sampling_factors=number_of_basis_functions,
widths=[x / np.sqrt(2) for x in widths]
) # note that kernel_noise_parameter default is 0
BLR_learn_sigma_object.fit(X_train, Y_train)
BLR_learn_sigma_object_predictions, BLR_learn_sigma_object_sigmas = BLR_learn_sigma_object.predict(X,
return_std=True)
BLR_learn_sigma_evidence_object = TuningAdapter(
regressor=BasisAdapter,
regressor_keyword_arguments={
'regressor': BayesLinearRegressor,
'l2_regularization_constant': None,
'fixed_prior_noise_parameter': prior_noise_parameter,
'fixed_noise_parameter': None,
'fit_intercept': fit_intercept,
'domains': [basis_domain],
'sampling_factors': number_of_basis_functions,
'widths': [x / np.sqrt(2) for x in widths]
},
hyperparameter_domains={
'fixed_noise_parameter': [1e-5, 10]
}
)
BLR_learn_sigma_evidence_object.fit(X_train, Y_train)
BLR_learn_sigma_evidence_object_predictions, BLR_learn_sigma_evidence_object_sigmas = BLR_learn_sigma_evidence_object.predict(
X,
return_std=True)
BLR_learn_sigma_and_width_evidence_object = TuningAdapter(
regressor=BasisAdapter,
regressor_keyword_arguments={
'regressor': BayesLinearRegressor,
'l2_regularization_constant': None,
'fixed_prior_noise_parameter': prior_noise_parameter,
'fixed_noise_parameter': None,
'fit_intercept': fit_intercept,
'domains': [basis_domain],
'sampling_factors': number_of_basis_functions,
'widths': [x / np.sqrt(2) for x in widths]
},
hyperparameter_domains={
'fixed_noise_parameter': [1e-5, 10],
'widths': [[1e-2, 10]]
}
)
BLR_learn_sigma_and_width_evidence_object.fit(X_train, Y_train)
BLR_learn_sigma_and_width_evidence_object_predictions, BLR_learn_sigma_and_width_evidence_object_sigmas = BLR_learn_sigma_and_width_evidence_object.predict(
X,
return_std=True)
BLR_learn_prior_noise_parameter_object = BasisAdapter(
regressor=BayesLinearRegressor(
l2_regularization_constant=None,
fixed_prior_noise_parameter=None,
fixed_noise_parameter=noise_parameter,
fit_intercept=fit_intercept
),
domains=[basis_domain],
sampling_factors=number_of_basis_functions,
widths=[x / np.sqrt(2) for x in widths]
) # note that kernel_noise_parameter default is 0
BLR_learn_prior_noise_parameter_object.fit(X_train, Y_train)
BLR_learn_prior_noise_parameter_object_predictions, BLR_learn_prior_noise_parameter_object_sigmas = BLR_learn_prior_noise_parameter_object.predict(
X, return_std=True)
BLR_fixed_regularization_learn_sigma_object = BasisAdapter(
regressor=BayesLinearRegressor(
l2_regularization_constant=1e-1, # noise_parameter**2 / prior_noise_parameter**2,
fixed_prior_noise_parameter=None,
fixed_noise_parameter=None,
fit_intercept=fit_intercept,
use_empirical_noise_parameter=False
),
domains=[basis_domain],
sampling_factors=number_of_basis_functions,
widths=[x / np.sqrt(2) for x in widths]
) # note that kernel_noise_parameter default is 0
BLR_fixed_regularization_learn_sigma_object.fit(X_train, Y_train)
BLR_fixed_regularization_learn_sigma_object_predictions, \
BLR_fixed_regularization_learn_sigma_object_sigmas = \
BLR_fixed_regularization_learn_sigma_object.predict(X, return_std=True)
BLR_fixed_prior_noise_parameter_object = BasisAdapter(
regressor=BayesLinearRegressor(
l2_regularization_constant=None,
fixed_prior_noise_parameter=prior_noise_parameter,
fixed_noise_parameter=noise_parameter,
fit_intercept=fit_intercept
),
domains=[basis_domain],
sampling_factors=number_of_basis_functions,
widths=[x / np.sqrt(2) for x in widths]
) # note that kernel_noise_parameter default is 0
BLR_fixed_prior_noise_parameter_object.fit(X_train, Y_train)
BLR_fixed_prior_noise_parameter_object_predictions, BLR_fixed_prior_noise_parameter_object_sigmas = BLR_fixed_prior_noise_parameter_object.predict(
X, return_std=True)
BLR_object = BasisAdapter(
regressor=BayesLinearRegressor(
l2_regularization_constant=noise_parameter ** 2 / prior_noise_parameter ** 2,
fixed_prior_noise_parameter=None,
fixed_noise_parameter=noise_parameter,
fit_intercept=fit_intercept
),
domains=[basis_domain],
sampling_factors=number_of_basis_functions,
widths=[x / np.sqrt(2) for x in widths]
) # note that kernel_noise_parameter default is 0
BLR_object_prior_predictions, BLR_object_prior_sigmas = BLR_object.predict(X, return_std=True)
BLR_object_prior_sample_y = [BLR_object.sample_y(X) for _ in range(10)]
BLR_object.fit(X_train, Y_train)
BLR_object_predictions, BLR_object_sigmas = BLR_object.predict(X, return_std=True)
BLR_object_posterior_sample_y = [BLR_object.sample_y(X) for _ in range(10)]
BLALR_object = BasisAdapter(
regressor=BayesLinearAlgebraLinearRegressor(
l2_regularization_constant=noise_parameter ** 2 / prior_noise_parameter ** 2,
fixed_prior_noise_parameter=None,
fixed_noise_parameter=noise_parameter,
fit_intercept=fit_intercept
),
domains=[basis_domain],
sampling_factors=number_of_basis_functions,
widths=[x / np.sqrt(2) for x in widths]
) # note that kernel_noise_parameter default is 0
BLALR_object.fit(X_train, Y_train)
BLALR_object_predictions, BLALR_object_sigmas = BLALR_object.predict(X, return_std=True)
# np.sum(np.abs(X_train_basis - BLR_object.transform(X_train))) / X_train_basis.size
# np.sum(np.abs(X_basis - BLR_object.transform(X))) / X_basis.size
train_index = round(X_train_basis.shape[0] / 2)
basis_index = round(X_train_basis.shape[1] / 2)
if render_plots:
plt.clf()
plt.figure()
y = BLR_object.transform(X_train[train_index]).T
print(np.max(y))
plt.plot(list(range(len(y))), y, 'r')
plt.show()
plt.clf()
plt.figure()
y = X_train_basis[train_index]
print(np.max(y))
plt.plot(list(range(len(y))), y, 'b')
plt.show()
######
# GP
######
K = GP_kernel(X_train)
K_s = GP_kernel(X_train, X)
K_ss = GP_kernel(X)
K_inv = np.linalg.pinv(K)
GP_predictions = K_s.T.dot(K_inv).dot(Y_train)
GP_cov = (K_ss - K_s.T.dot(K_inv).dot(K_s))
GP_sigmas = np.sqrt(np.abs(np.diag(GP_cov))).reshape(-1, 1)
######
# Plots
######
if render_plots:
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, GP_predictions, GP_sigmas, plot_range, plot_domain,
'Gaussian Process'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_predictions, BLR_sigmas, plot_range, plot_domain,
'Bayesian Linear Regression'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_eigen_predictions, BLR_eigen_sigmas, plot_range, plot_domain,
'Bayesian Linear Regression (Eigenbasis)'
)
if compare_specialized_modules:
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_object_prior_predictions, BLR_object_prior_sigmas,
plot_range,
plot_domain,
'Bayesian Linear Regression Prior',
'prior_regression_results.png'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_object_prior_sample_y, None,
plot_range,
plot_domain,
'Bayesian Linear Regression Prior Samples',
'prior_samples.png'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_object_posterior_sample_y, None,
plot_range,
plot_domain,
'Bayesian Linear Regression Posterior Samples',
'posterior_samples.png'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_learn_sigma_object_predictions, BLR_learn_sigma_object_sigmas,
plot_range,
plot_domain,
'Bayesian Linear Regression with Learned $\sigma$ and Fixed $\sigma_p$'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_learn_sigma_evidence_object_predictions, BLR_learn_sigma_evidence_object_sigmas,
plot_range,
plot_domain,
'Bayesian Linear Regression with Learned $\sigma$ and Fixed $\sigma_p$ with Evidence'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_learn_sigma_and_width_evidence_object_predictions, BLR_learn_sigma_and_width_evidence_object_sigmas,
plot_range,
plot_domain,
'Bayesian Linear Regression with Learned $\sigma$ and $l$, Fixed $\sigma_p$ with Evidence'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_learn_prior_noise_parameter_object_predictions,
BLR_learn_prior_noise_parameter_object_sigmas, plot_range, plot_domain,
'Bayesian Linear Regression with Fixed $\sigma$ and Learned $\sigma_p$'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_learn_sigma_and_prior_noise_parameter_object_predictions,
BLR_learn_sigma_and_prior_noise_parameter_object_sigmas, plot_range, plot_domain,
'Bayesian Linear Regression with Learned $\sigma$ and $\sigma_p$'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_fixed_regularization_learn_sigma_object_predictions,
BLR_fixed_regularization_learn_sigma_object_sigmas, plot_range, plot_domain,
'Bayesian Linear Regression with Learned $\sigma$ and Fixed $\lambda$'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_fixed_prior_noise_parameter_object_predictions,
BLR_fixed_prior_noise_parameter_object_sigmas, plot_range, plot_domain,
'Bayesian Linear Regression with Fixed $\sigma_p$ and $\sigma$'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLR_object_predictions, BLR_object_sigmas, plot_range, plot_domain,
'Bayesian Linear Regression with Fixed $\lambda=\sigma^2/\sigma_p^2$'
)
plot_prediction_uncertainty_and_data(
X, objective, X_train, Y_train, BLALR_object_predictions, BLALR_object_sigmas, plot_range, plot_domain,
'Bayesian Linear Regression with Fixed $\lambda=\sigma^2/\sigma_p^2$ (alt.)'
)
######
# Eigendecomposition
######
if render_plots:
K = GP_kernel(basis_centers)
eigenvalues, eigenvectors = np.linalg.eig(K)
comparison = GP_kernel(basis_centers)
plt.clf()
plt.figure()
plt.plot(basis_centers, eigenvectors.T[0], 'red', label='eigenbasis[0]')
plt.plot(basis_centers, eigenvectors.T[1], 'green', label='eigenbasis[1]')
plt.plot(basis_centers, eigenvectors.T[2], 'blue', label='eigenbasis[2]')
plt.plot(basis_centers, eigenvectors.T[3], 'orange', label='eigenbasis[3]')
plt.xlim(plot_domain)
plt.legend(loc='lower right')
plt.title('Bayesian Linear Regression Eigenbasis Examples')
plt.savefig(expanduser('~/Downloads/eigenbasis.png'), dpi=300)
plt.show()
basis = BLR_basis(basis_centers)
index_increment = round(len(basis) * 0.1)
plt.clf()
plt.figure()
if fit_intercept:
plt.plot(basis_centers, basis[0, 1:], 'red', label='basis 0%')
plt.plot(basis_centers, basis[index_increment, 1:], 'green', label='basis 10%')
plt.plot(basis_centers, basis[2 * index_increment, 1:], 'blue', label='basis 20%')
plt.plot(basis_centers, basis[3 * index_increment, 1:], 'orange', label='basis 30%')
else:
plt.plot(basis_centers, basis[0], 'red', label='basis 0%')
plt.plot(basis_centers, basis[index_increment], 'green', label='basis 10%')
plt.plot(basis_centers, basis[2 * index_increment], 'blue', label='basis 20%')
plt.plot(basis_centers, basis[3 * index_increment], 'orange', label='basis 30%')
plt.xlim(plot_domain)
plt.legend(loc='upper right')
plt.title('Bayesian Linear Regression Manual Basis Examples')
plt.savefig(expanduser('~/Downloads/manualbasis.png'), dpi=300)
plt.show()
######
# Log-likelihood Comparison
######
K = GP_kernel(X_train)
K_inv = np.linalg.pinv(K)
if fit_intercept:
beta_means_for_log_likelihood = beta_means[1:]
else:
beta_means_for_log_likelihood = beta_means
log_likelihood_first_term_GP = float((Y_train.T - np.average(Y_train)) @ K_inv @ (Y_train - | np.average(Y_train) | numpy.average |
import numpy as np
from tqdm import tqdm
import time
import random
import sys
import pyfiglet
LENGTH = 3
class Environment:
def __init__(self):
self.board = np.zeros((LENGTH, LENGTH)) # 0 showing an empty board
self.X = -1 # number sign for X player
self.O = 1 # number sign for O player
self.winner = None # variable to keep track of if there is a winner
self.ended = False # variable to track if the game is over or not
self.total_states = 3 ** (LENGTH * LENGTH) # total number of possible states in the game
def is_empty(self, i, j):
return self.board[i, j] == 0
def reward(self, symbol):
if not self.game_over():
return 0
return 1 if self.winner == symbol else 0 # symbol is self.X or self.O
def get_state(self):
"""
Return the hash int for the board state
Hash int is generated as Ternary number system (using 0,1,2)
"""
k = 0 # Number representing the cell
h = 0 # Hash Number
for i in range(LENGTH):
for j in range(LENGTH):
if self.board[i, j] == 0:
v = 0
elif self.board[i, j] == self.X:
v = 1
elif self.board[i, j] == self.O:
v = 2
h += v*(3 ** k)
k += 1
return h
def game_over(self, force_recalculate=False):
"""
Check for winner:
along row
along column
along diagonals
check if draw
else return false
"""
if not force_recalculate and self.ended:
return self.ended
# checking across rows
for i in range(LENGTH):
for player in (self.X, self.O):
if self.board[i].sum() == player * LENGTH:
self.ended = True
self.winner = player
return True
# checking across columns
for j in range(LENGTH):
for player in (self.X, self.O):
if self.board[:, j].sum() == player * LENGTH:
self.ended = True
self.winner = player
return True
# checking along both diagonals
for player in (self.X, self.O):
# principal diagonal
if self.board.trace() == player * LENGTH:
self.winner = player
self.ended = True
return True
# secondary diagonal
if np.fliplr(self.board).trace() == player * LENGTH:
self.winner = player
self.ended = True
return True
# check for draw condition
if np.all((self.board == 0) == False): # There are still cells to fill
self.winner = None
self.ended = True
return True
# else return false
self.winner = None # reset for multiple calls of function
return False
def draw_board(self):
"""
Member function to draw the board to play
"""
print("-------------")
for i in range(LENGTH):
print("|", end='')
for j in range(LENGTH):
if self.board[i, j] == self.X:
print(' X |', end='')
elif self.board[i, j] == self.O:
print(' O |', end='')
else:
print(' |', end='')
print("")
print("-------------")
class Agent:
def __init__(self, eps=0.1, alpha=0.5):
self.eps = eps # the threshold to guide explore exploit decision
self.alpha = alpha # learning rate
self.verbose = False # True to show agent's decision making process
self.state_history = [] # To keep track of state history for an episode
def setV(self, V):
"""
To set the value function for the agent
:param V: Value function
"""
self.V = V
def set_symbol(self, symbol):
"""
To give the agent a symbol to play
:param symbol: self.X or self.O
"""
self.symbol = symbol
def set_verbose(self, v):
"""
prints more info if b is true
:param b: True or false for verbosity
"""
self.verbose = v
def reset_history(self):
"""
To reset the history when episode is finished
"""
self.state_history = []
def take_action(self, env):
"""
The agent to take action given the current environment
Action is taken as per epsilon greedy method
:param env: Environment class object
:return:
"""
r = np.random.rand()
best_state = None
if r <= self.eps:
# explore by taking a random action
if self.verbose:
print("Taking a random action...")
possible_moves = []
for i in range(LENGTH):
for j in range(LENGTH):
if env.is_empty(i, j):
possible_moves.append((i, j))
# select a random possible move
id = np.random.choice(len(possible_moves))
next_move = possible_moves[id]
else:
# exploit by selecting the best action
pos2value = {} # To store all the position to value dict for verbose
best_value = -99
next_move = None
for i in range(LENGTH):
for j in range(LENGTH):
if env.is_empty(i, j):
env.board[i, j] = self.symbol
state = env.get_state()
env.board[i, j] = 0 # changing it back
pos2value[(i, j)] = self.V[state]
if self.V[state] > best_value:
best_value = self.V[state]
best_state = state
next_move = (i, j)
if self.verbose:
print("Taking a greedy action")
# printing value of the position wherever empty
print("-------------")
for i in range(LENGTH):
print("|", end='')
for j in range(LENGTH):
if env.board[i, j] == env.X:
print(' X |', end='')
elif env.board[i, j] == env.O:
print(' O |', end='')
else:
num = round(pos2value[(i, j)], 2)
print('.%d|' % (num*1e2), end='')
print("")
print("-------------")
# making the move
env.board[next_move[0], next_move[1]] = self.symbol
def update_state_history(self, state):
"""
Updating state history for a given episode
:param state: state value
"""
self.state_history.append(state)
def update(self, env):
"""
Queries the environment for the latest reward. Learning epicentre
"""
reward = env.reward(self.symbol)
target = reward
for prev in reversed(self.state_history):
value = self.V[prev] + self.alpha*(target - self.V[prev])
self.V[prev] = value # This value estimate converges to it's 'actual value' in the episode
target = value
self.reset_history()
class Hooman:
def __init__(self):
pass
def set_symbol(self, symbol):
self.symbol = symbol
def take_action(self, env):
while True:
move = input("Enter the position as i,j you want to place your move (i,j ∈ {0,1,2): ")
# break if we make a valid move
i, j = move.split(',')
i = int(i.strip())
j = int(j.strip())
if env.is_empty(i,j):
env.board[i,j] = self.symbol
break
else:
print("Invalid move! Try again...")
def update_state_history(self, state):
pass
def update(self, env):
pass
def play_game(p1, p2, env, draw=0):
"""
Main function that is called to play the game
:param p1: player 1 object
:param p2: player 2 object
:param env: Environment object
:param draw: draw the board for which player (1 or 2)
"""
# iterates until the game is over
current_player = None
while not env.game_over():
# alternate chances in between players p1 starts first
if current_player == p1:
current_player = p2
else:
current_player = p1
# draw before the hooman makes a move.
if draw:
if draw == 1 and current_player == p1:
env.draw_board()
if draw == 2 and current_player == p2:
env.draw_board()
# The current player makes a move
current_player.take_action(env)
# updating state histories
state = env.get_state()
p1.update_state_history(state)
p2.update_state_history(state)
# Draws the board at the end of the game
if draw:
env.draw_board()
if draw == 2:
if env.winner == env.X:
print("I won! Better luck next time Noob XD")
else:
print("Congrats! You won!!")
else:
if env.winner == env.O:
print("I won! Better luck next time Noob XD")
else:
print("Congrats! You won!!")
# doing value function updates
p1.update(env)
p2.update(env)
def get_state_hash_winner(env, i=0, j=0):
"""
Returns the tuple for (state_hash, ended, winner)
"""
results = []
for v in (0, env.X, env.O):
env.board[i, j] = v
if j == LENGTH-1:
if i == LENGTH-1:
state = env.get_state()
ended = env.game_over(force_recalculate=True)
winner = env.winner
results.append((state, ended, winner))
else:
results += get_state_hash_winner(env, i+1, 0) # next row first column
else:
results += get_state_hash_winner(env, i, j+1) # next column same row
return results
def initialize_vx(env, state_winner_tuples):
V = | np.zeros(env.total_states) | numpy.zeros |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = | np.array([]) | numpy.array |
import random
import pickle
from datetime import datetime
import os
import torch
import numpy as np
import imageio
import cv2
from torch.utils.data import Dataset
import torchvision.transforms as transforms
normalize_transform = transforms.Compose(
[transforms.ConvertImageDtype(torch.float)
])
def random_points_from_grid(seed=None, num_points=9, central_area_side=30, total_area_side=50):
'''
"Choose 9 points randomly from the central 30x30 area of a 50x50 grid"
Becomes the initial point of the dot distortion shapes.
'''
if seed is not None:
random.seed(seed)
points = []
for i in range(num_points):
x = random.randint(total_area_side/2-central_area_side/2, total_area_side/2 + central_area_side/2)
y = random.randint(total_area_side/2-central_area_side/2, total_area_side/2 + central_area_side/2)
points.append((x, y))
return points
def create_relative_coordinates(dot_block_image_path='dot_distortion_areas.png'):
'''
I created a 21x21 pixel PNG image using GIMP that has different brightness values for each kind of 'area'.
This function parses that image to return a list of tuples of relative coordinates for each area.
Areas are 1,2,3,4,5.
'''
im = imageio.imread(dot_block_image_path)
pixel_brightness = sorted(set(im[10]))
brightness_to_areas = dict()
for i, num in enumerate(pixel_brightness):
brightness_to_areas.update({num: i+1})
areas_array = np.zeros(im.shape)
for i in range(len(areas_array)):
for j in range(len(areas_array[i])):
areas_array[i][j] = brightness_to_areas[im[i][j]]
assert set(areas_array.flatten()) == {1, 2, 3, 4, 5} # ensure that all pixel values have been converted to areas indexes
coords_per_area = []
for k in range(1,len(set(areas_array.flatten()))+1):
coords_per_area.append([(i,j) for i in range(len(areas_array)) for j in range(len(areas_array[i])) if areas_array[i][j] == k])
# subtract (10,10) from all coords; turn into relative values from center pixel
rel_coords_per_area = coords_per_area
for i in range(len(coords_per_area)):
rel_coords_per_area[i] = [(x-10,y-10) for (x,y) in coords_per_area[i]]
return rel_coords_per_area
relative_shifts = create_relative_coordinates()
def distort_dot(coords, distortion_level, relative_shifts, area_names=[1,2,3,4,5]):
'''
Randomly move dot to corresponding area_name according to a probability distribution for area_names given by distortion_level
coords (tuple(int,int)): (x, y) point coordinates
distortion_level (str): choose from '1', '2', '3', '4', '5', '6', '7.7'
relative_shifts (list[list[tuple(int,int)]]): list of lists of relative coordinates for each area_name
area_names (list[int])
'''
level_to_probs = {
# level names correspond to bits per dot
# value is the probability distribution over the 5 area_names
'1' : (0.88, 0.10, 0.015, 0.004, 0.001),
'2' : (0.75, 0.15, 0.05, 0.03, 0.02),
'3' : (0.59, 0.20, 0.16, 0.03, 0.02),
'4' : (0.36, 0.48, 0.06, 0.05, 0.05),
'5' : (0.20, 0.30, 0.40, 0.05, 0.05),
'6' : (0, 0.40, 0.32, 0.15, 0.13),
'7.7': (0, 0.24, 0.16, 0.30, 0.30)
}
# check that all probability distributions sum to 1
for x in ['1','2','3','4','5','6','7.7']:
assert sum(level_to_probs[x]) == 1.0
probs = level_to_probs[distortion_level]
area_selection = np.random.choice(area_names, p=probs)
pixel_shift_selection = random.choice(relative_shifts[area_selection-1])
return (coords[0] + pixel_shift_selection[0], coords[1] + pixel_shift_selection[1])
def scale_coords(coords, scale_factor=3):
'''
Scale coordinates by a factor
'''
return (coords[0]*scale_factor, coords[1]*scale_factor)
def generate_single_dot_distortion(seed=None, distortion_level='3', relative_shifts=relative_shifts, scale_factor=3, draw_bbox=False):
'''
seed: if not None, use seed to create the same category
'''
img = np.zeros([150, 150, 1],np.uint8)
shifted_points = []
for p in random_points_from_grid(seed=seed):
shifted_points.append(distort_dot(coords=p, distortion_level=distortion_level, relative_shifts=relative_shifts))
scaled_points = [scale_coords(c, scale_factor=scale_factor) for c in shifted_points]
x1, y1 = np.amin(np.array(scaled_points), axis=0)
x2, y2 = np.amax(np.array(scaled_points), axis=0)
bbox = (x1, y1, x2, y2)
img = | np.zeros([150, 150, 1],np.uint8) | numpy.zeros |
import glob, os, shutil, sys, json
from pathlib import Path
import pylab as plt
import trimesh
import open3d
from easydict import EasyDict
import numpy as np
from tqdm import tqdm
import utils
from features import MeshFPFH
FIX_BAD_ANNOTATION_HUMAN_15 = 0
# Labels for all datasets
# -----------------------
sigg17_part_labels = ['---', 'head', 'hand', 'lower-arm', 'upper-arm', 'body', 'upper-lag', 'lower-leg', 'foot']
sigg17_shape2label = {v: k for k, v in enumerate(sigg17_part_labels)}
model_net_labels = [
'bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor', 'night_stand', 'sofa', 'table', 'toilet',
'wardrobe', 'bookshelf', 'laptop', 'door', 'lamp', 'person', 'curtain', 'piano', 'airplane', 'cup',
'cone', 'tent', 'radio', 'stool', 'range_hood', 'car', 'sink', 'guitar', 'tv_stand', 'stairs',
'mantel', 'bench', 'plant', 'bottle', 'bowl', 'flower_pot', 'keyboard', 'vase', 'xbox', 'glass_box'
]
model_net_shape2label = {v: k for k, v in enumerate(model_net_labels)}
model_net_weights = [265, 1186, 2300, 407, 404, 956, 381, 1645, 755, 919, 145, 1002, 260, 204, 303, 248, 330, 617, 1874,
159, 213, 295, 267, 189, 303, 587, 332, 447, 483, 275, 817, 354, 623, 868, 119, 385, 412, 1216,
278, 183]
future3d_labels = ['Children Cabinet', 'Nightstand', 'Bookcase / jewelry Armoire','Wardrobe', 'Coffee Table', 'Corner/Side Table',
'Sideboard / Side Cabinet / Console Table','Wine Cabinet', 'TV Stand', 'Drawer Chest / Corner cabinet',
'Shelf', 'Round End Table', 'King-size Bed', 'Bunk Bed', 'Bed Frame', 'Single bed', 'Kids Bed', 'Dining Chair',
'Lounge Chair / Cafe Chair / Office Chair', 'Dressing Chair', 'Classic Chinese Chair','Barstool',
'Dressing Table', 'Dining Table', 'Desk', 'Three-Seat / Multi-seat Sofa', 'armchair', 'Loveseat Sofa',
'L-shaped Sofa', 'Lazy Sofa', 'Chaise Longue Sofa', 'Footstool / Sofastool / Bed End Stool / Stool',
'Pendant Lamp', 'Ceiling Lamp']
future3d_excluded_labels = ['Dressing Chair', 'Chaise Longue Sofa']
future3d_labels = [x.lower() for x in future3d_labels]
future3d_super_labels = [x.lower() for x in ['Cabinet/Shelf/Desk', 'Bed', 'Chair', 'Table', 'Sofa', 'Pier/Stool', 'Lighting']]
future_3d_labels_to_super = [12, 5, 5, 3, 6, 1, 2]
future3d_shape2label = {v: k for k, v in enumerate(future3d_labels)}
future3d_weights = [259, 1045, 262, 724, 1644, 1171, 1046, 169, 581, 643, 187, 168, 1260, 140, 395, 482, 139, 1139,
1411, 24, 32, 365, 291, 736, 198, 2479, 1741, 1169, 385, 204, 4, 885, 1915, 611]
cubes_labels = [
'apple', 'bat', 'bell', 'brick', 'camel',
'car', 'carriage', 'chopper', 'elephant', 'fork',
'guitar', 'hammer', 'heart', 'horseshoe', 'key',
'lmfish', 'octopus', 'shoe', 'spoon', 'tree',
'turtle', 'watch'
]
cubes_shape2label = {v: k for k, v in enumerate(cubes_labels)}
shrec11_labels = [
'armadillo', 'man', 'centaur', 'dinosaur', 'dog2',
'ants', 'rabbit', 'dog1', 'snake', 'bird2',
'shark', 'dino_ske', 'laptop', 'santa', 'flamingo',
'horse', 'hand', 'lamp', 'two_balls', 'gorilla',
'alien', 'octopus', 'cat', 'woman', 'spiders',
'camel', 'pliers', 'myScissor', 'glasses', 'bird1'
]
shrec11_shape2label = {v: k for k, v in enumerate(shrec11_labels)}
coseg_labels = [
'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c',
]
coseg_shape2label = {v: k for k, v in enumerate(coseg_labels)}
# ShapenetCore-55
shapenet_synsetIds = ['02691156', '02747177', '02773838', '02801938', '02808440', '02818832', '02828884', '02843684',
'02871439', '02876657', '02880940', '02924116', '02933112', '02942699', '02946921', '02954340',
'02958343', '02992529', '03001627', '03046257', '03085013', '03207941', '03211117', '03261776',
'03325088', '03337140', '03467517', '03513137', '03593526', '03624134', '03636649', '03642806',
'03691459', '03710193', '03759954', '03761084', '03790512', '03797390', '03928116', '03938244',
'03948459', '03991062', '04004475', '04074963', '04090263', '04099429', '04225987', '04256520',
'04330267', '04379243', '04401088', '04460130', '04468005', '04530566', '04554684']
shapenet_synset_to_label = {'02691156': 'airplane,aeroplane,plane',
'02747177': 'ashcan,trash can,garbage can,wastebin,ash bin,ash-bin,ashbin,dustbin,trash barrel,trash bin',
'02773838': 'bag,traveling bag,travelling bag,grip,suitcase',
'02801938': 'basket,handbasket',
'02808440': 'bathtub,bathing tub,bath,tub',
'02818832': 'bed',
'02828884': 'bench',
'02843684': 'birdhouse',
'02871439': 'bookshelf',
'02876657': 'bottle',
'02880940': 'bowl',
'02924116': 'bus,autobus,coach,charabanc,double-decker,jitney,motorbus,motorcoach,omnibus,passenger vehi',
'02933112': 'cabinet',
'02942699': 'camera,photographic camera',
'02946921': 'can,tin,tin can',
'02954340': 'cap',
'02958343': 'car,auto,automobile,machine,motorcar',
'03001627': 'chair',
'03046257': 'clock',
'03085013': 'computer keyboard,keypad',
'03207941': 'dishwasher,dish washer,dishwashing machine',
'03211117': 'display,video display',
'03261776': 'earphone,earpiece,headphone,phone',
'03325088': 'faucet,spigot',
'03337140': 'file,file cabinet,filing cabinet',
'03467517': 'guitar',
'03513137': 'helmet',
'03593526': 'jar',
'03624134': 'knife',
'03636649': 'lamp',
'03642806': 'laptop,laptop computer',
'03691459': 'loudspeaker,speaker,speaker unit,loudspeaker system,speaker system',
'03710193': 'mailbox,letter box',
'03759954': 'microphone,mike',
'03761084': 'microwave,microwave oven',
'03790512': 'motorcycle,bike',
'03797390': 'mug',
'03928116': 'piano,pianoforte,forte-piano',
'03938244': 'pillow',
'03948459': 'pistol,handgun,side arm,shooting iron',
'03991062': 'pot,flowerpot',
'04004475': 'printer,printing machine',
'04074963': 'remote control,remote',
'04090263': 'rifle',
'04099429': 'rocket,projectile',
'04225987': 'skateboard',
'04256520': 'sofa,couch,lounge',
'04330267': 'stove',
'04379243': 'table',
'04401088': 'telephone,phone,telephone set',
'02992529': 'cellular telephone,cellular phone,cellphone,cell,mobile phone',
'04460130': 'tower',
'04468005': 'train,railroad train',
'04530566': 'vessel,watercraft',
'04554684': 'washer,automatic washer,washing machine'}
shapenet_labels = [shapenet_synset_to_label[x] for x in shapenet_synsetIds]
shapenet_shapeid2label = {v: k for k, v in enumerate(shapenet_synsetIds)}
def rotate_vertices(vertices, angle):
y = angle * np.pi / 180
R = np.array(((np.cos(y),-np.sin(y), 0),
(np.sin(y), np.cos(y), 0),
(0 , 0, 1)),
dtype=vertices.dtype)
np.dot(vertices, R, out=vertices)
def calc_mesh_area(mesh):
t_mesh = trimesh.Trimesh(vertices=mesh['vertices'], faces=mesh['faces'], process=False)
mesh['area_faces'] = t_mesh.area_faces
mesh['area_vertices'] = np.zeros((mesh['vertices'].shape[0]))
for f_index, f in enumerate(mesh['faces']):
for v in f:
mesh['area_vertices'][v] += mesh['area_faces'][f_index] / f.size
def calc_vertex_labels_from_face_labels(mesh, face_labels):
vertices = mesh['vertices']
faces = mesh['faces']
all_vetrex_labels = [[] for _ in range(vertices.shape[0])]
vertex_labels = -np.ones((vertices.shape[0],), dtype=np.int)
n_classes = int(np.max(face_labels))
assert np.min(face_labels) == 1 # min label is 1, for compatibility to human_seg labels representation
v_labels_fuzzy = -np.ones((vertices.shape[0], n_classes))
for i in range(faces.shape[0]):
label = face_labels[i]
for f in faces[i]:
all_vetrex_labels[f].append(label)
for i in range(vertices.shape[0]):
counts = np.bincount(all_vetrex_labels[i])
vertex_labels[i] = np.argmax(counts)
v_labels_fuzzy[i] = np.zeros((1, n_classes))
for j in all_vetrex_labels[i]:
v_labels_fuzzy[i, int(j) - 1] += 1 / len(all_vetrex_labels[i])
return vertex_labels, v_labels_fuzzy
def prepare_edges_and_kdtree(mesh):
vertices = mesh['vertices']
faces = mesh['faces']
mesh['edges'] = [set() for _ in range(vertices.shape[0])]
for i in range(faces.shape[0]):
for v in faces[i]:
mesh['edges'][v] |= set(faces[i])
for i in range(vertices.shape[0]):
if i in mesh['edges'][i]:
mesh['edges'][i].remove(i)
mesh['edges'][i] = list(mesh['edges'][i])
max_vertex_degree = np.max([len(e) for e in mesh['edges']])
for i in range(vertices.shape[0]):
if len(mesh['edges'][i]) < max_vertex_degree:
mesh['edges'][i] += [-1] * (max_vertex_degree - len(mesh['edges'][i]))
mesh['edges'] = np.array(mesh['edges'], dtype=np.int32)
mesh['kdtree_query'] = []
t_mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)
n_nbrs = min(10, vertices.shape[0] - 2)
for n in range(vertices.shape[0]):
d, i_nbrs = t_mesh.kdtree.query(vertices[n], n_nbrs)
i_nbrs_cleared = [inbr for inbr in i_nbrs if inbr != n and inbr < vertices.shape[0]]
if len(i_nbrs_cleared) > n_nbrs - 1:
i_nbrs_cleared = i_nbrs_cleared[:n_nbrs - 1]
mesh['kdtree_query'].append(np.array(i_nbrs_cleared, dtype=np.int32))
mesh['kdtree_query'] = np.array(mesh['kdtree_query'])
assert mesh['kdtree_query'].shape[1] == (n_nbrs - 1), 'Number of kdtree_query is wrong: ' + str(mesh['kdtree_query'].shape[1])
def prepare_face_edges(mesh):
tmesh = trimesh.Trimesh(mesh['vertices'], mesh['faces'])
mesh['faces_edges'] = tmesh.face_adjacency
mesh['faces_edges_angles'] = tmesh.face_adjacency_angles
def add_fields_and_dump_model(mesh_data, fileds_needed, out_fn, dataset_name, dump_model=True):
m = {}
for k, v in mesh_data.items():
if k in fileds_needed:
m[k] = v
for field in fileds_needed:
if field not in m.keys():
if field == 'labels':
m[field] = np.zeros((0,))
if field == 'dataset_name':
m[field] = dataset_name
if field == 'walk_cache':
m[field] = np.zeros((0,))
if field == 'kdtree_query' or field == 'edges':
prepare_edges_and_kdtree(m)
if field == 'tri_centers':
t_mesh = trimesh.Trimesh(vertices=mesh_data.vertices, faces=mesh_data.faces, process=False)
m[field] = t_mesh.triangles_center
if field == 'tri_edges':
prepare_face_edges(m)
if field == 'vertex_normals':
t_mesh = trimesh.Trimesh(vertices=mesh_data.vertices, faces=mesh_data.faces, process=False)
m[field] = t_mesh.vertex_normals
if field == 'mfpfh':
fph = MeshFPFH(EasyDict(m), 2)
m[field] = fph.calc_fpfh()
if dump_model:
np.savez(out_fn, **m)
return m
def get_sig17_seg_bm_labels(mesh, file, seg_path):
# Finding the best match file name .. :
in_to_check = file.replace('obj', 'txt')
in_to_check = in_to_check.replace('off', 'txt')
in_to_check = in_to_check.replace('_fix_orientation', '')
if in_to_check.find('MIT_animation') != -1 and in_to_check.split('/')[-1].startswith('mesh_'):
in_to_check = '/'.join(in_to_check.split('/')[:-2])
in_to_check = in_to_check.replace('MIT_animation/meshes_', 'mit/mit_')
in_to_check += '.txt'
elif in_to_check.find('/scape/') != -1:
in_to_check = '/'.join(in_to_check.split('/')[:-1])
in_to_check += '/scape.txt'
elif in_to_check.find('/faust/') != -1:
in_to_check = '/'.join(in_to_check.split('/')[:-1])
in_to_check += '/faust.txt'
seg_full_fn = []
for fn in Path(seg_path).rglob('*.txt'):
tmp = str(fn)
tmp = tmp.replace('/segs/', '/meshes/')
tmp = tmp.replace('_full', '')
tmp = tmp.replace('shrec_', '')
tmp = tmp.replace('_corrected', '')
if tmp == in_to_check:
seg_full_fn.append(str(fn))
if len(seg_full_fn) == 1:
seg_full_fn = seg_full_fn[0]
else:
print('\nin_to_check', in_to_check)
print('tmp', tmp)
raise Exception('!!')
face_labels = np.loadtxt(seg_full_fn)
if FIX_BAD_ANNOTATION_HUMAN_15 and file.endswith('test/shrec/15.off'):
face_center = []
for f in mesh.faces:
face_center.append(np.mean(mesh.vertices[f, :], axis=0))
face_center = np.array(face_center)
idxs = (face_labels == 6) * (face_center[:, 0] < 0) * (face_center[:, 1] < -0.4)
face_labels[idxs] = 7
np.savetxt(seg_full_fn + '.fixed.txt', face_labels.astype(np.int))
return face_labels
def get_labels(dataset_name, mesh, file, fn2labels_map=None):
v_labels_fuzzy = np.zeros((0,))
if dataset_name == 'faust':
face_labels = np.load('faust_labels/faust_part_segmentation.npy').astype(np.int)
vertex_labels, v_labels_fuzzy = calc_vertex_labels_from_face_labels(mesh, face_labels)
model_label = np.zeros((0,))
return model_label, vertex_labels, v_labels_fuzzy
elif dataset_name.startswith('coseg') or dataset_name == 'human_seg_from_meshcnn':
labels_fn = '/'.join(file.split('/')[:-2]) + '/seg/' + file.split('/')[-1].split('.')[-2] + '.eseg'
e_labels = np.loadtxt(labels_fn)
v_labels = [[] for _ in range(mesh['vertices'].shape[0])]
faces = mesh['faces']
fuzzy_labels_fn = '/'.join(file.split('/')[:-2]) + '/sseg/' + file.split('/')[-1].split('.')[-2] + '.seseg'
seseg_labels = np.loadtxt(fuzzy_labels_fn)
v_labels_fuzzy = np.zeros((mesh['vertices'].shape[0], seseg_labels.shape[1]))
edge2key = dict()
edges = []
edges_count = 0
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
faces_edges[idx] = edge
if edge not in edge2key:
v_labels_fuzzy[edge[0]] += seseg_labels[edges_count]
v_labels_fuzzy[edge[1]] += seseg_labels[edges_count]
edge2key[edge] = edges_count
edges.append(list(edge))
v_labels[edge[0]].append(e_labels[edges_count])
v_labels[edge[1]].append(e_labels[edges_count])
edges_count += 1
assert np.max(np.sum(v_labels_fuzzy != 0, axis=1)) <= 3, 'Number of non-zero labels must not acceeds 3!'
vertex_labels = []
for l in v_labels:
l2add = np.argmax(np.bincount(l))
vertex_labels.append(l2add)
vertex_labels = np.array(vertex_labels)
model_label = np.zeros((0,))
return model_label, vertex_labels, v_labels_fuzzy
else:
tmp = file.split('/')[-1]
model_name = '_'.join(tmp.split('_')[:-1])
if dataset_name.lower().startswith('modelnet40_preprocessed'):
model_label = model_net_shape2label['_'.join(model_name.split('_')[:-1])]
elif dataset_name.lower().startswith('modelnet'):
model_label = model_net_shape2label[model_name]
elif dataset_name.lower().startswith('cubes'):
model_label = cubes_shape2label[model_name]
elif dataset_name.lower().startswith('shrec11'):
model_name = file.split('/')[-3]
if fn2labels_map is None:
model_label = shrec11_shape2label[model_name]
else:
file_index = int(file.split('.')[-2].split('T')[-1])
model_label = fn2labels_map[file_index]
else:
raise Exception('Cannot find labels for the dataset')
vertex_labels = np.zeros((0,))
return model_label, vertex_labels, v_labels_fuzzy
def fix_labels_by_dist(vertices, orig_vertices, labels_orig):
labels = -np.ones((vertices.shape[0], ))
for i, vertex in enumerate(vertices):
d = np.linalg.norm(vertex - orig_vertices, axis=1)
orig_idx = np.argmin(d)
labels[i] = labels_orig[orig_idx]
return labels
def get_faces_belong_to_vertices(vertices, faces):
faces_belong = []
for face in faces:
used = np.any([v in vertices for v in face])
if used:
faces_belong.append(face)
return np.array(faces_belong)
def remesh(mesh_orig, target_n_faces, add_labels=False, labels_orig=None):
labels = labels_orig
if target_n_faces < np.asarray(mesh_orig.triangles).shape[0]:
mesh = mesh_orig.simplify_quadric_decimation(target_n_faces)
str_to_add = '_simplified_to_' + str(target_n_faces)
mesh = mesh.remove_unreferenced_vertices()
if add_labels and labels_orig.size:
labels = fix_labels_by_dist(np.asarray(mesh.vertices), np.asarray(mesh_orig.vertices), labels_orig)
else:
mesh = mesh_orig
str_to_add = '_not_changed_' + str(np.asarray(mesh_orig.triangles).shape[0])
return mesh, labels, str_to_add
def load_meshes(model_fns):
f_names = glob.glob(model_fns)
joint_mesh_vertices = []
joint_mesh_faces = []
for fn in f_names:
mesh_ = trimesh.load_mesh(fn)
vertex_offset = len(joint_mesh_vertices)
joint_mesh_vertices += mesh_.vertices.tolist()
faces = mesh_.faces + vertex_offset
joint_mesh_faces += faces.tolist()
mesh = open3d.geometry.TriangleMesh()
mesh.vertices = open3d.utility.Vector3dVector(joint_mesh_vertices)
mesh.triangles = open3d.utility.Vector3iVector(joint_mesh_faces)
return mesh
def load_mesh(model_fn, classification=True):
if 'FUTURE' not in model_fn: # To load and clean up mesh - "remove vertices that share position"
if classification:
mesh_ = trimesh.load_mesh(model_fn, process=True)
if type(mesh_) is trimesh.Scene:
mesh = open3d.io.read_triangle_mesh(model_fn)
return mesh
else:
mesh_.remove_duplicate_faces()
else:
mesh_ = trimesh.load_mesh(model_fn, process=False)
mesh = open3d.geometry.TriangleMesh()
mesh.vertices = open3d.utility.Vector3dVector(mesh_.vertices)
mesh.triangles = open3d.utility.Vector3iVector(mesh_.faces)
else:
if not '26b439df-fba5-3b38-b6c5-bc6a4c1fb0a0' in model_fn: # list of mixed-faces (four sided faces + three sided in same .obj)
mesh = open3d.io.read_triangle_mesh(model_fn)
else:
mesh_ = trimesh.load_mesh(model_fn, process=False)
mesh = open3d.geometry.TriangleMesh()
mesh.vertices = open3d.utility.Vector3dVector(mesh_.vertices)
mesh.triangles = open3d.utility.Vector3iVector(mesh_.faces)
mesh.remove_duplicated_vertices()
mesh.remove_unreferenced_vertices()
mesh.remove_duplicated_triangles()
mesh.remove_degenerate_triangles()
return mesh
def create_tmp_dataset(model_fn, p_out, n_target_faces):
fileds_needed = ['vertices', 'faces', 'edge_features', 'edges_map', 'edges', 'kdtree_query',
'label', 'labels', 'dataset_name']
if not os.path.isdir(p_out):
os.makedirs(p_out)
mesh_orig = load_mesh(model_fn)
mesh, labels, str_to_add = remesh(mesh_orig, n_target_faces)
labels = np.zeros((np.asarray(mesh.vertices).shape[0],), dtype=np.int16)
mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles), 'label': 0, 'labels': labels})
out_fn = p_out + '/tmp'
add_fields_and_dump_model(mesh_data, fileds_needed, out_fn, 'tmp')
def prepare_single_mesh(params):
file, classification, p_out, fn_prefix, n_target_faces, add_labels, label, dataset_name = params
fileds_needed = ['vertices', 'faces', 'edges', 'kdtree_query',
'label', 'labels', 'dataset_name', 'vertex_normals', 'tri_centers', 'tri_edges']
out_fn = p_out + '/' + fn_prefix + os.path.split(file)[1].split('.')[0]
try:
mesh = load_mesh(file, classification=classification)
except:
print('failed loading mesh {}'.format(file))
mesh_orig = mesh
mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles)})
if label is None:
if add_labels:
if type(add_labels) is list:
fn2labels_map = add_labels
else:
fn2labels_map = None
label, labels_orig, v_labels_fuzzy = get_labels(dataset_name, mesh_data, file, fn2labels_map=fn2labels_map)
else:
label = np.zeros((0,))
else:
labels_orig = None
for this_target_n_faces in n_target_faces:
mesh, labels, str_to_add = remesh(mesh_orig, this_target_n_faces, add_labels=add_labels, labels_orig=labels_orig)
# str_to_add = '_simplified_{}'.format(this_target_n_faces)
if 'modelnet40_retrieval' in p_out:
for ang in np.linspace(-180, 180, int(360/30), endpoint=False):
verts = np.asarray(mesh.vertices)
rotate_vertices(verts, ang)
mesh_data = EasyDict(
{'vertices': verts, 'faces': np.asarray(mesh.triangles), 'label': label, 'labels': labels})
# mesh_data['labels_fuzzy'] = v_labels_fuzzy
out_fc_full = out_fn + '_{:03d}'.format(int(ang) + 180) +str_to_add
if os.path.exists(out_fc_full):
continue
try:
m = add_fields_and_dump_model(mesh_data, fileds_needed, out_fc_full, dataset_name)
except:
print('debug {}'.format(out_fc_full))
else:
verts = np.asarray(mesh.vertices)
mesh_data = EasyDict(
{'vertices': verts, 'faces': np.asarray(mesh.triangles), 'label': label, 'labels': labels})
# mesh_data['labels_fuzzy'] = v_labels_fuzzy
out_fc_full = out_fn + str_to_add
if os.path.exists(out_fc_full):
continue
try:
m = add_fields_and_dump_model(mesh_data, fileds_needed, out_fc_full, dataset_name)
except:
print('debug {}'.format(out_fc_full))
def prepare_directory_from_scratch(dataset_name, pathname_expansion=None, p_out=None, n_target_faces=None, add_labels=True,
size_limit=np.inf, fn_prefix='', verbose=True, classification=True, label=None, filenames=None):
if not os.path.isdir(p_out):
os.makedirs(p_out)
if filenames is None:
filenames = glob.glob(pathname_expansion)
filenames.sort()
if not len(filenames):
print('debug')
if len(filenames) > size_limit:
filenames = filenames[:size_limit]
for i, file in enumerate(filenames):
# if i < 7183:
# continue
prepare_single_mesh([file, classification, p_out, fn_prefix, n_target_faces, add_labels, label, dataset_name])
# from multiprocessing import Pool
# pool = Pool(8)
#
# inputs = [[file, classification, p_out, fn_prefix, n_target_faces, add_labels, label, dataset_name] for file in filenames]
# pool.map(prepare_single_mesh, inputs)
# pool.close()
# pool.join()
# for file in tqdm(filenames, disable=1 - verbose):
# out_fn = p_out + '/' + fn_prefix + os.path.split(file)[1].split('.')[0]
# mesh = load_mesh(file, classification=classification)
# mesh_orig = mesh
# mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles)})
# if label is None:
# if add_labels:
# if type(add_labels) is list:
# fn2labels_map = add_labels
# else:
# fn2labels_map = None
# label, labels_orig, v_labels_fuzzy = get_labels(dataset_name, mesh_data, file, fn2labels_map=fn2labels_map)
# else:
# label = np.zeros((0, ))
# else:
# labels_orig = None
# for this_target_n_faces in n_target_faces:
# mesh, labels, str_to_add = remesh(mesh_orig, this_target_n_faces, add_labels=add_labels, labels_orig=labels_orig)
# # str_to_add = '_simplified_{}'.format(this_target_n_faces)
# mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles), 'label': label, 'labels': labels})
# # mesh_data['labels_fuzzy'] = v_labels_fuzzy
# out_fc_full = out_fn + str_to_add
# if os.path.exists(out_fc_full):
# continue
# try:
# m = add_fields_and_dump_model(mesh_data, fileds_needed, out_fc_full, dataset_name)
# except:
# print('debug')
# ------------------------------------------------------- #
def prepare_modelnet40():
n_target_faces = [1000, 2000, 4000]
labels2use = model_net_labels
for i, name in tqdm(enumerate(labels2use)):
for part in ['test', 'train']:
pin = os.path.expanduser('~') + '/Databases/ModelNet40_1k2k4k/' + name + '/' + part + '/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/modelnet40_1k2k4k/'
prepare_directory_from_scratch('modelnet40_preprocessed', pathname_expansion=pin + '*.off',
p_out=p_out, add_labels='modelnet', n_target_faces=n_target_faces,
fn_prefix=part + '_', verbose=False)
def prepare_modelnet40_retrieval():
n_target_faces = [1000, 2000, 4000]
labels2use = model_net_labels
for i, name in tqdm(enumerate(labels2use)):
# split train files into 80 for train and 20 for test
fold = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/train/'
tst_fold = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/test/'
all_train_files = [os.path.join(fold, x) for x in os.listdir(fold) if x.endswith('.off')]
all_test_files = [os.path.join(tst_fold, x) for x in os.listdir(tst_fold) if x.endswith('.off')]
for split_idx in range(2):
if split_idx == 0:
all_train_files.sort()
all_test_files.sort()
else:
all_train_files = np.random.permutation(all_train_files)
all_test_files = np.random.permutation(all_test_files)
cur_files = {'train': all_train_files[:80],
'test': all_test_files[:20]}
for part in ['test', 'train']:
pin = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/' + part + '/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/modelnet40_retrieval_split_{}'.format(split_idx)
prepare_directory_from_scratch('modelnet',
pathname_expansion=pin + '*.off',
p_out=p_out, add_labels='modelnet', n_target_faces=n_target_faces,
fn_prefix=part + '_', verbose=False, filenames=cur_files[part])
def prepare_modelnet40_80_20():
n_target_faces = [1000, 2000, 4000]
labels2use = model_net_labels
for i, name in tqdm(enumerate(labels2use)):
# split train files into 80 for train and 20 for test
fold = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/train/'
tst_fold = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/test/'
all_train_files = [os.path.join(fold, x) for x in os.listdir(fold) if x.endswith('.off')]
all_train_files.sort()
all_test_files = [os.path.join(tst_fold, x) for x in os.listdir(tst_fold) if x.endswith('.off')]
all_test_files.sort()
cur_files = {'train': all_train_files[:80],
'test': all_test_files[:20]}
for part in ['test', 'train']:
pin = os.path.expanduser('~') + '/Databases/ModelNet40/' + name + '/' + part + '/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/modelnet40_80_20'
prepare_directory_from_scratch('modelnet',
pathname_expansion=pin + '*.off',
p_out=p_out, add_labels='modelnet', n_target_faces=n_target_faces,
fn_prefix=part + '_', verbose=False, filenames=cur_files[part])
def prepare_3dfuture():
n_target_faces = [1000, 2000, 4000, np.inf]
labels2use = future3d_labels
splits_path = os.path.expanduser('~') + '/Databases/3D-FUTURE/GT/model_infos.json'
with open(splits_path, 'r') as f:
json_files = json.load(f)
for i, file in tqdm(enumerate(json_files)):
# if i < 7183:
# continue
split = 'train' if file['is_train'] else 'test'
pin = os.path.expanduser('~') + '/Databases/3D-FUTURE/3D-FUTURE-model/' + file['model_id'] + '/raw_model.obj'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/3dFUTURE_raw/' + split + '_' + file['model_id']
if os.path.exists(p_out):
models = os.listdir(p_out)
n_models = len(models)
if n_models == 4:
continue
else:
n_max = [x.split('_')[-1].split('.')[0] for x in models if 'not_changed' in x]
if len(n_max):
n_max = int(n_max[0])
if n_max > 2000 and n_models == 3:
continue
elif n_max > 1000 and n_models == 2:
continue
elif n_max < 1000 and n_models == 1:
continue
# elif n_models == 3 and np.max([int(x.split('_')[-1].split('.')[0]) for x in models]) <= 4000:
# continue
# elif n_models == 2 and np.max([int(x.split('_')[-1].split('.')[0]) for x in models]) <= 2000:
# continue
# elif n_models == 1 and np.max([int(x.split('_')[-1].split('.')[0]) for x in models]) <= 1000:
# continue
prepare_directory_from_scratch('3dFUTURE', pathname_expansion=pin,
p_out=p_out, add_labels='3dfuture', n_target_faces=n_target_faces,
fn_prefix='', label=future3d_shape2label[file['category'].lower()], verbose=False)
def prepare_shapenetcore55():
import csv
n_target_faces = [1000, 2000, 4000]
base_path = '/media/ran/a6f25521-bcdb-4606-a6a0-5d8b26d7f1d8/home/ran/ShapeNetCore.v2/'
path = base_path + 'all.csv'
with open(path) as f:
filelist = [{k: v for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
for i, file in tqdm(enumerate(filelist)):
split = file['split']
pin = os.path.join(base_path, file['synsetId'], file['modelId'], 'models', 'model_normalized.obj')
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/shapenetcore55/' + '_'.join([split, file['synsetId'], file['modelId']])
prepare_directory_from_scratch('shapenetcore', pathname_expansion=pin,
p_out=p_out, add_labels='shapenetcore', n_target_faces=n_target_faces,
fn_prefix='', label=shapenet_shapeid2label[file['synsetId']], verbose=False)
def prepare_cubes(labels2use=cubes_labels,
path_in=os.path.expanduser('~') + '/datasets/cubes/',
p_out=os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/cubes_tmp'):
dataset_name = 'cubes'
if not os.path.isdir(p_out):
os.makedirs(p_out)
for i, name in enumerate(labels2use):
print('-->>>', name)
for part in ['test', 'train']:
pin = path_in + name + '/' + part + '/'
prepare_directory_from_scratch(dataset_name, pathname_expansion=pin + '*.obj',
p_out=p_out, add_labels=dataset_name, fn_prefix=part + '_', n_target_faces=[np.inf],
classification=False)
def prepare_shrec11_from_raw():
# Prepare labels per model name
current_label = None
model_number2label = [-1 for _ in range(600)]
for line in open(os.path.expanduser('~') + '/datasets/shrec11/evaluation/test.cla'):
sp_line = line.split(' ')
if len(sp_line) == 3:
name = sp_line[0].replace('_test', '')
if name in shrec11_labels:
current_label = name
else:
raise Exception('?')
if len(sp_line) == 1 and sp_line[0] != '\n':
model_number2label[int(sp_line[0])] = shrec11_shape2label[current_label]
# Prepare npz files
p_in = os.path.expanduser('~') + '/datasets/shrec11/raw/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets_processed-tmp/shrec11_raw_1.5k/'
prepare_directory_from_scratch('shrec11', pathname_expansion=p_in + '*.off',
p_out=p_out, add_labels=model_number2label, n_target_faces=[1500])
# Prepare split train / test
change_train_test_split(p_out, 16, 4, '16-04_C')
def calc_face_labels_after_remesh(mesh_orig, mesh, face_labels):
t_mesh = trimesh.Trimesh(vertices=np.array(mesh_orig.vertices), faces=np.array(mesh_orig.triangles), process=False)
remeshed_face_labels = []
for face in mesh.triangles:
vertices = np.array(mesh.vertices)[face]
center = np.mean(vertices, axis=0)
p, d, closest_face = trimesh.proximity.closest_point(t_mesh, [center])
remeshed_face_labels.append(face_labels[closest_face[0]])
return remeshed_face_labels
def prepare_human_body_segmentation():
dataset_name = 'sig17_seg_benchmark'
labels_fuzzy = True
human_seg_path = os.path.expanduser('~') + '/mesh_walker/datasets/sig17_seg_benchmark/'
p_out = os.path.expanduser('~') + '/mesh_walker/datasets/sig17_seg_benchmark-no_simplification/'
fileds_needed = ['vertices', 'faces', 'edge_features', 'edges_map', 'edges', 'kdtree_query',
'label', 'labels', 'dataset_name', 'face_labels']
if labels_fuzzy:
fileds_needed += ['labels_fuzzy']
n_target_faces = [np.inf]
if not os.path.isdir(p_out):
os.makedirs(p_out)
for part in ['test', 'train']:
print('part: ', part)
path_meshes = human_seg_path + '/meshes/' + part
seg_path = human_seg_path + '/segs/' + part
all_fns = []
for fn in Path(path_meshes).rglob('*.*'):
all_fns.append(fn)
for fn in tqdm(all_fns):
model_name = str(fn)
if model_name.endswith('.obj') or model_name.endswith('.off') or model_name.endswith('.ply'):
new_fn = model_name[model_name.find(part) + len(part) + 1:]
new_fn = new_fn.replace('/', '_')
new_fn = new_fn.split('.')[-2]
out_fn = p_out + '/' + part + '__' + new_fn
mesh = mesh_orig = load_mesh(model_name, classification=False)
mesh_data = EasyDict({'vertices': np.asarray(mesh.vertices), 'faces': np.asarray(mesh.triangles)})
face_labels = get_sig17_seg_bm_labels(mesh_data, model_name, seg_path)
labels_orig, v_labels_fuzzy = calc_vertex_labels_from_face_labels(mesh_data, face_labels)
if 0: # Show segment borders
b_vertices = np.where(np.sum(v_labels_fuzzy != 0, axis=1) > 1)[0]
vertex_colors = np.zeros((mesh_data['vertices'].shape[0],), dtype=np.int)
vertex_colors[b_vertices] = 1
utils.visualize_model(mesh_data['vertices'], mesh_data['faces'], vertex_colors_idx=vertex_colors, point_size=2)
if 0: # Show face labels
utils.visualize_model(mesh_data['vertices'], mesh_data['faces'], face_colors=face_labels, show_vertices=False, show_edges=False)
if 0:
print(model_name)
print('min: ', np.min(mesh_data['vertices'], axis=0))
print('max: ', | np.max(mesh_data['vertices'], axis=0) | numpy.max |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 21 23:47:36 2020
@author: Christian
"""
import hysteresis as hys
from hysteresis.protocol import getReturnCycle
import numpy as np
import matplotlib.pyplot as plt
def test_getReturnCycle():
""" Tests if the getReturnCycle function"""
t = np.linspace(0,1,101)
x1 = 1 - 1.5*t
y1 = (3)*x1**2 - 1
x2 = x1[-1] + t*2
y2 = y1[-1] + t*4
TestCycle1 = hys.SimpleCycle(np.column_stack([x1,y1]))
TestCycle2 = hys.SimpleCycle(np.column_stack([x2,y2]))
TestCycle3 = getReturnCycle(TestCycle1, TestCycle2)
xySolution = | np.zeros([76,2]) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import os
import copy as cp
import random
random.seed(0)
import math
class TrafficInteraction:
# vm = 0; % minimum
# velocity
# v0 = 10; % initial
# velocity
# vM = 13; % maximum
# velocity
# am = -3; % minimum
# acceleration
# aM = 3; % maximum
def __init__(self, arrive_time, dis_ctl, args, deltaT=0.1, vm=5, vM=13, am=-3, aM=3, v0=10, diff_max=220,
lane_cw=2.5,
loc_con=True, show_col=False, virtual_l=True, lane_num=12):
# 坐标轴,车道0 从左到右, 车道1,从右到左 车道2,从下到上 车道3 从上到下
# dis_ctl
# -dis_ctl 0 dis_ctl
# -dis_ctl
self.virtual_l = virtual_l
self.virtual_data = {}
self.show_col = show_col
self.loc_con = loc_con
self.collision_thr = args.collision_thr
self.choose_veh = 15
self.safe_distance = 20
self.vm = vm
self.vM = vM
self.am = am
self.aM = aM
self.v0 = v0
self.lane_cw = lane_cw
self.lane_num = lane_num
self.intention_re = 0
self.thr = pow(self.vM - self.vm, 2) / 4 / self.aM + 2.2
self.choose_veh_info = [[] for i in range(self.lane_num)]
self.veh_info_record = [[] for i in range(self.lane_num)]
if self.lane_num == 3:
# T字形
self.lane_info = [
[dis_ctl - 2 * lane_cw, 3.1415 / 2 * 3 * lane_cw, -(dis_ctl - 2 * lane_cw)], # 左转
[dis_ctl - 2 * lane_cw, 4 * lane_cw, -(dis_ctl - 2 * lane_cw)], # 直行
[dis_ctl - 2 * lane_cw, 3.1415 / 2 * lane_cw, -(dis_ctl - 2 * lane_cw)] # 右转
]
self.lane2lane = [
[2, 4, 5],
[2],
[4, 0, 1],
[4],
[0, 2, 3],
[0]
]
self.intention = [
[1, 2],
[0, 1],
[0, 2]
]
elif self.lane_num == 4:
# 单车道
self.lane_info = [
[dis_ctl - 2 * lane_cw, 3.1415 / 2 * 3 * lane_cw, -(dis_ctl - 2 * lane_cw)], # 左转
[dis_ctl - 2 * lane_cw, 4 * lane_cw, -(dis_ctl - 2 * lane_cw)], # 直行
[dis_ctl - 2 * lane_cw, 3.1415 / 2 * lane_cw, -(dis_ctl - 2 * lane_cw)] # 右转
]
# 顺序:如果左转直行和右转同时交汇,按照左转,直行,右转的顺序排列
self.lane2lane = [
[10, 6, 9, 3, 7, 4, 8], # 0
[10, 6, 3, 4, 9, 5], # 1
[6, 10], # 2
[1, 9, 0, 6, 10, 7, 11], # 3
[1, 9, 6, 7, 0, 8], # 4
[9, 1], # 5
[4, 0, 3, 9, 1, 10, 2], # 6
[4, 0, 9, 10, 3, 11], # 7
[0, 4], # 8
[7, 3, 6, 0, 4, 1, 5], # 9
[7, 3, 0, 1, 6, 2], # 10
[3, 7] # 11
]
self.direction_num = 12
self.direction = [
[6, 7, 8],
[0, 1, 2],
[9, 10, 11],
[3, 4, 5]
]
self.alpha = math.atan((4 - math.sqrt(2)) / (4 + math.sqrt(2))) # 图中alpha的值
self._alpha = math.atan((4 + math.sqrt(2)) / (4 - math.sqrt(2)))
self.beta = math.atan(2 / math.sqrt(5)) # 图中beta的值
self._beta = math.atan(math.sqrt(5) / 2)
self.gama = math.atan(1 / 2 * math.sqrt(2)) # 图中gamma的值
elif self.lane_num == 8:
# 两车道
self.lane_info = [
[dis_ctl - 4 * lane_cw, 3.1415 / 2 * 5 * lane_cw, -(dis_ctl - 4 * lane_cw)],
[dis_ctl - 4 * lane_cw, 8 * lane_cw, -(dis_ctl - 4 * lane_cw)],
[dis_ctl - 4 * lane_cw, 3.1415 / 2 * lane_cw, -(dis_ctl - 4 * lane_cw)]
]
self.lane2lane = [
[14, 4, 13, 12, 9, 10, 5], # 0
[14, 13, 8, 4, 5, 6, 12], # 1
[14, 13, 8, 4, 5, 6, 7], # 2
[14], # 3
[2, 8, 1, 0, 13, 14, 9], # 4
[2, 1, 12, 8, 9, 10, 0], # 5
[2, 1, 12, 8, 9, 10, 11], # 6
[2], # 7
[6, 12, 5, 4, 1, 2, 13], # 8
[6, 5, 0, 12, 13, 14, 4], # 9
[6, 5, 0, 12, 13, 14, 15], # 10
[6], # 11
[10, 0, 9, 8, 5, 6, 1], # 12
[10, 9, 4, 0, 1, 2, 8], # 13
[10, 9, 4, 0, 1, 2, 3], # 14
[10] # 15
]
self.intention = [
[0, 1],
[1, 2],
[0, 1],
[1, 2],
[0, 1],
[1, 2],
[0, 1],
[1, 2]
]
self.direction_num = 16
self.direction = [
[0, 1, -1],
[-1, 2, 3],
[4, 5, -1],
[-1, 6, 7],
[8, 9, -1],
[-1, 10, 11],
[12, 13, -1],
[-1, 14, 15]
]
elif self.lane_num == 12:
# 三车道
self.lane_info = [
[dis_ctl - 6 * lane_cw, 3.1415 / 2 * 7 * lane_cw, -(dis_ctl - 6 * lane_cw)],
[dis_ctl - 6 * lane_cw, 12 * lane_cw, -(dis_ctl - 6 * lane_cw)],
[dis_ctl - 6 * lane_cw, 3.1415 / 2 * lane_cw, -(dis_ctl - 6 * lane_cw)]
]
self.lane2lane = [
[10, 3, 9, 7],
[10, 6, 3, 4],
[],
[1, 6, 0, 10],
[1, 9, 6, 7],
[],
[4, 9, 3, 1],
[4, 0, 9, 10],
[],
[7, 0, 6, 4],
[7, 3, 0, 1],
[]
]
self.direction_num = 12
self.direction = [
[0, -1, -1],
[-1, 1, -1],
[-1, -1, 2],
[3, -1, -1],
[-1, 4, -1],
[-1, -1, 5],
[6, -1, -1],
[-1, 7, -1],
[-1, -1, 8],
[9, -1, -1],
[-1, 10, -1],
[-1, -1, 11]
]
self.cita = (2 * math.sqrt(10) - 6) * self.lane_cw # 曲线交点据x轴或y轴的距离
self.alpha = math.atan((6 * self.lane_cw + self.cita) / (3 * self.lane_cw)) # 交点与在半圆中中的角度(大的一个)
self.beta = math.pi / 2 - self.alpha # 交点与在半圆中中的角度(小的一个)
self.gama = math.atan((math.sqrt(13) * self.lane_cw) / (6 * self.lane_cw)) # 两圆交点在半角中的角度(小的)
self._gama = math.pi / 2 - self.gama
self.closer_veh_num = args.o_agent_num
self.c_mode = args.c_mode
self.merge_p = [
[0, 0, self.lane_cw, -self.lane_cw],
[0, 0, -self.lane_cw, self.lane_cw],
[-self.lane_cw, self.lane_cw, 0, 0],
[self.lane_cw, -self.lane_cw, 0, 0]
]
self.arrive_time = arrive_time
self.current_time = 0
self.passed_veh = 0
self.passed_veh_step_total = 0
self.virtual_lane = []
self.virtual_lane_4 = [[] for i in range(self.direction_num)]
self.virtual_lane_real_p = [[] for i in range(self.direction_num)]
self.closer_cars = []
self.closer_same_l_car = [-1, -1]
self.deltaT = deltaT
self.dis_control = dis_ctl
self.veh_num = [0 for i in range(self.lane_num)] # 每个车道车的数量
self.veh_rec = [0 for i in range(self.lane_num)] # 每个车道车的总数量
self.input = [0 for i in range(4)] # 每个入口车的总数量
self.veh_info = [[] for i in range(self.lane_num)]
self.diff_max = diff_max
self.collision = False
self.id_seq = 0
self.delete_veh = []
init = True
while init:
for i in range(self.lane_num):
if self.veh_num[i] > 0:
init = False
if init:
self.scene_update()
def scene_update(self):
self.current_time += self.deltaT
collisions = 0
estm_collisions = 0
re_state = []
reward = []
collisions_per_veh = []
actions = []
ids = []
jerks = []
self.delete_veh.clear()
for i in range(self.lane_num):
if len(self.veh_info[i]) > 0:
for index, direction in enumerate(self.direction[i]):
if direction == -1:
continue
self.virtual_lane_4[direction].clear()
self.virtual_lane_real_p[direction].clear()
for _itr in self.virtual_lane:
# 目标车道
if _itr[1] == i:
self.virtual_lane_real_p[direction].append([_itr[0], _itr[1], _itr[2],
self.veh_info[_itr[1]][_itr[2]]["v"],
direction])
if self.direction[_itr[1]][_itr[3]] == direction:
# 同一车道直接添加 p, i, j, v
self.virtual_lane_4[direction].append([_itr[0], _itr[1], _itr[2],
self.veh_info[_itr[1]][_itr[2]]["v"], direction])
else:
if self.veh_info[_itr[1]][_itr[2]]["p"] - \
self.lane_info[self.veh_info[_itr[1]][_itr[2]]['intention']][1] > 0:
virtual_dis = self.veh_info[_itr[1]][_itr[2]]["p"] - \
self.lane_info[self.veh_info[_itr[1]][_itr[2]]['intention']][1] + \
self.lane_info[index][1]
self.virtual_lane_4[direction].append(
[virtual_dis, _itr[1], _itr[2], self.veh_info[_itr[1]][_itr[2]]["v"],
direction])
elif self.direction[_itr[1]][_itr[3]] in self.lane2lane[direction]:
# 与之相交的车道
virtual_d, choose = self.get_virtual_distance(self.direction[_itr[1]][_itr[3]], direction,
_itr[0])
if choose:
self.virtual_lane_real_p[direction].append([_itr[0], _itr[1], _itr[2],
self.veh_info[_itr[1]][_itr[2]]["v"],
direction])
for virtual_temp in range(len(virtual_d)):
self.virtual_lane_4[direction].append([virtual_d[virtual_temp], _itr[1], _itr[2],
self.veh_info[_itr[1]][_itr[2]]["v"],
self.direction[_itr[1]][_itr[3]]])
self.virtual_lane_4[direction] = sorted(self.virtual_lane_4[direction], key=lambda item: item[0])
self.virtual_lane_real_p[direction] = sorted(self.virtual_lane_real_p[direction],
key=lambda item: item[0])
for j, item in enumerate(self.veh_info[i]):
if self.veh_info[i][j]["intention"] == index:
if self.veh_info[i][j]["seq_in_lane"] == self.choose_veh:
self.choose_veh_info[i].append(
[self.current_time, self.veh_info[i][j]["p"], self.veh_info[i][j]["v"],
self.veh_info[i][j]["action"]])
t_distance = 2
d_distance = 10
if self.veh_info[i][j]["control"]:
self.veh_info_record[i][item["seq_in_lane"]].append(
[self.current_time, item["p"], item["v"], item["a"]]
)
sta, virtual_lane = self.get_state(i, j, self.virtual_lane_4[direction], direction)
self.virtual_lane_4[direction] = virtual_lane
self.veh_info[i][j]["state"] = cp.deepcopy(sta)
re_state.append(np.array(sta))
actions.append([state[2] for state in sta])
ids.append([i, j])
self.veh_info[i][j]["count"] += 1
closer_car = self.closer_cars[0]
if closer_car[0] >= 0:
id_seq_temp = [temp_item[1:3] for temp_item in self.virtual_lane_4[direction]]
if [closer_car[0], closer_car[1]] not in id_seq_temp:
index_closer = -1
else:
index_closer = id_seq_temp.index([closer_car[0], closer_car[1]])
d_distance = abs(
self.veh_info[i][j]["p"] - self.virtual_lane_4[direction][index_closer][0])
self.veh_info[i][j]["closer_p"] = self.virtual_lane_4[direction][index_closer][0]
if d_distance != 0:
t_distance = (self.veh_info[i][j]["p"] -
self.virtual_lane_4[direction][index_closer][0]) / \
(self.veh_info[i][j]["v"] -
self.veh_info[closer_car[0]][closer_car[1]]["v"] +
0.0001)
else:
self.veh_info[i][j]["closer_p"] = 150
vw = 2.0
cw = 3.0
r_ = 0
if 0 < t_distance < 4:
r_ += 1 / np.tanh(-t_distance / 4.0)
r_ -= pow(self.veh_info[i][j]["jerk"] / self.deltaT, 2) / 3600.0 * cw
if d_distance < 10:
r_ += np.log(pow(d_distance / 10, 5) + 0.00001)
r_ += (self.veh_info[i][j]["v"] - self.vm) / float(self.aM - self.am) * vw
reward.append(min(20, max(-20, r_)))
self.veh_info[i][j]["jerk_sum"] += abs(self.veh_info[i][j]["jerk"] / self.deltaT)
if 0 <= closer_car[0]:
veh_choose = self.veh_info[i][j]
veh_closer = self.veh_info[closer_car[0]][closer_car[1]]
p_choose = self.get_p(veh_choose["p"], i, self.veh_info[i][j]["intention"])
p_closer = self.get_p(veh_closer["p"], closer_car[0],
self.veh_info[closer_car[0]][closer_car[1]]["intention"])
d_distance = np.sqrt(
np.power((p_closer[0] - p_choose[0]), 2) + np.power((p_closer[1] - p_choose[1]),
2)
)
if abs(d_distance) < self.collision_thr:
self.veh_info[i][j]["collision"] += 1 # 发生碰撞
self.veh_info[closer_car[0]][closer_car[1]]["collision"] += 1 # 发生碰撞
if self.veh_info[i][j]["finish"]:
self.veh_info[i][j]["control"] = False
collisions += self.veh_info[i][j]["collision"]
estm_collisions += self.veh_info[i][j]["estm_collision"]
collisions_per_veh.append(
[self.veh_info[i][j]["collision"], self.veh_info[i][j]["estm_collision"]])
if self.veh_info[i][j]["p"] < -self.dis_control + int(
(self.lane_num + 1) / 2) * self.lane_cw or self.veh_info[i][j][
"collision"] > 0:
# 驶出交通路口, 删除该车辆
if self.veh_info[i][j]["collision"] > 0:
reward[-1] = -10
self.veh_info[i][j]["Done"] = True
self.delete_veh.append([i, j])
self.veh_info[i][j]["vir_header"] = [-1, -1]
elif self.veh_info[i][j]["p"] < 0 and self.veh_info[i][j]["control"]:
self.veh_info[i][j]["Done"] = True
self.veh_info[i][j]["finish"] = True
self.veh_info[i][j]["control"] = False
self.veh_info[i][j]["vir_header"] = [-1, -1]
self.veh_info[i][j]["lock"] = False
self.passed_veh += 1
reward[-1] = 5
jerks.append(self.veh_info[i][j]["jerk_sum"])
self.passed_veh_step_total += self.veh_info[i][j]["step"]
# 添加新车
self.add_new_veh(i)
# if self.show_col:
# print("add new car:", i, self.veh_num[i] - 1)
self.virtual_lane.clear()
lock = 0
for i in range(self.lane_num):
for j, veh in enumerate(self.veh_info[i]):
if veh["control"] and not self.veh_info[i][j]["lock"]:
if self.check_lock(i, j):
lock += 1
for v in self.virtual_lane_4[0]:
v_name = "%s_%s" % (v[1], self.veh_info[v[1]][v[2]]["seq_in_lane"])
if v_name not in self.virtual_data:
self.virtual_data[v_name] = []
self.virtual_data[v_name].append([self.current_time, v[0], v[3]])
return ids, re_state, reward, actions, collisions, estm_collisions, collisions_per_veh, jerks, lock
def add_new_veh(self, i):
if self.current_time >= self.arrive_time[self.veh_rec[i]][i]:
state_total = np.zeros((self.closer_veh_num + 1, (self.closer_veh_num + 1) * 4))
intention = 1 # 默认
random.seed()
if self.lane_num == 3:
intention = self.intention[i][random.randint(0, 1)]
elif self.lane_num == 4:
# intention = random.randint(0, 2)
intention = self.intention_re % 3
self.intention_re += 1
elif self.lane_num == 8:
intention = self.intention[i][random.randint(0, 1)]
# intention = self.intention[i][self.intention_re % 2]
self.intention_re += 1
elif self.lane_num == 12:
intention = i % 3
p = sum(self.lane_info[intention][0:2])
self.veh_info[i].append(
{
"intention": intention, # 随机生成意向0~2分别表示左转,直行和右转
"buffer": [],
"route": self.direction[i][intention],
"count": 0,
"Done": False,
"p": p,
"jerk": 0,
"jerk_sum": 0,
"lock_a": 0,
"lock": False,
"vir_header": [-1, -1],
"vir_dis": 100,
"v": self.v0,
"a": 0,
"action": 0,
"closer_p": 150,
"lane": i,
"header": False,
"reward": 10,
"dis_front": 50,
"seq_in_lane": self.veh_rec[i],
"control": True,
"state": state_total,
"step": 0,
"collision": 0,
"finish": False,
"estm_collision": 0,
"estm_arrive_time": abs(p / self.v0),
"id_info": [self.id_seq, self.veh_num[i]]
})
# "id_info":[在所有车中的出现次序,在当前车道中的出现次序]
self.veh_num[i] += 1
self.veh_rec[i] += 1
self.input[i % 4] += 1
self.veh_info_record[i].append([])
self.id_seq += 1
def delete_vehicle(self):
# 删除旧车
self.delete_veh = sorted(self.delete_veh, key=lambda item: -item[1])
for d_i in self.delete_veh:
if len(self.veh_info[d_i[0]]) > d_i[1]:
self.veh_info[d_i[0]].pop(d_i[1])
if self.veh_num[d_i[0]] > 0:
self.veh_num[d_i[0]] -= 1
else:
print("except!!!")
# 返回两车在按照碰撞点为原点的情况下的距离,lane1和p1为遍历到的车辆,lane2为每次遍历固定的lane
def get_virtual_distance(self, lane1, lane2, p1):
virtual_d = []
thr = 0
# if self.lane_num==4:
# thr = -5
choose = False
if self.lane_num == 4:
if lane2 in [0, 3, 6, 9]:
if lane1 == self.lane2lane[lane2][0]:
delta_d1 = p1 - (4 * self.lane_cw - 3 * self.lane_cw * math.cos(self.gama))
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + (3 * self.lane_cw * (0.5 * 3.1415 - self.gama)))
choose = True
# if lane1 == self.lane2lane[lane2][1]:
# delta_d1 = p1 - (1.5 * 3.1415) * self.lane_cw * (self.alpha / (0.5 * 3.1415))
# if delta_d1 > thr:
# virtual_d.append(abs(delta_d1) + (1.5 * 3.1415 * self.lane_cw * (self._alpha / (0.5 * 3.1415))))
# choose = True
if lane1 == self.lane2lane[lane2][2]:
delta_d1 = p1 - 1.5 * 3.1415 * self.lane_cw * self.beta / (0.5 * 3.1415)
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + (1.5 * 3.1415 * self.lane_cw * self._beta / (0.5 * 3.1415)))
choose = True
if lane1 == self.lane2lane[lane2][3]:
delta_d1 = p1 - 1.5 * 3.1415 * self.lane_cw * self._beta / (0.5 * 3.1415)
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + 1.5 * 3.1415 * self.lane_cw * self.beta / (0.5 * 3.1415))
choose = True
if lane1 == self.lane2lane[lane2][1]:
delta_d1 = p1 - (1.5 * 3.1415) * self.lane_cw * (self._alpha / (0.5 * 3.1415))
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + (1.5 * 3.1415) * self.lane_cw * (self.alpha / (0.5 * 3.1415)))
choose = True
if lane1 == self.lane2lane[lane2][4]:
delta_d1 = p1 - 3 * self.lane_cw * math.cos(self.gama)
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + (1.5 * 3.1415 * self.lane_cw * (self.gama / (0.5 * 3.1415))))
choose = True
if lane1 == self.lane2lane[lane2][5]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
if lane1 == self.lane2lane[lane2][6]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
elif lane2 in [1, 4, 7, 10]:
if lane1 == self.lane2lane[lane2][0]:
delta_d1 = p1 - self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + 3 * self.lane_cw)
choose = True
elif lane1 == self.lane2lane[lane2][1]:
delta_d1 = p1 - 1.5 * 3.1415 * self.lane_cw * self.gama / (0.5 * 3.1415)
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + 3 * self.lane_cw * math.cos(self.gama))
choose = True
elif lane1 == self.lane2lane[lane2][2]:
delta_d1 = p1 - 1.5 * 3.1415 * self.lane_cw * (0.5 * 3.1415 - self.gama) / (0.5 * 3.1415)
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + (4 * self.lane_cw - 3 * self.lane_cw * math.cos(self.gama)))
choose = True
elif lane1 == self.lane2lane[lane2][3]:
delta_d1 = p1 - 3 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + self.lane_cw)
choose = True
elif lane1 == self.lane2lane[lane2][4]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
elif lane1 == self.lane2lane[lane2][5]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
elif lane2 in [2, 5, 8, 11]:
if lane1 == self.lane2lane[lane2][0]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
if lane1 == self.lane2lane[lane2][1]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
elif self.lane_num == 8:
# 左转车道
# [14, 4, 13, 12, 9, 10, 5]
if lane2 in [0, 4, 8, 12]:
if lane1 == self.lane2lane[lane2][0]:
delta_d1 = p1 - (8 * self.lane_cw - math.sqrt(24) * self.lane_cw)
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + math.atan(math.sqrt(24)) * 5 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][1]:
delta_d1 = p1 - math.atan(3 / 4) * 5 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + math.atan(4 / 3) * 5 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][2]:
delta_d1 = p1 - 4 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + math.atan(4 / 3) * 5 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][3]: # lane2==0 lane1==12
# ! delta_d1 = p1 - 5 * self.lane_cw
delta_d1 = p1 - math.atan(4 / 3) * 5 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + math.atan(3 / 4) * 5 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][4]:
delta_d1 = p1 - 4 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + math.atan(3 / 4) * 5 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][5]:
delta_d1 = p1 - math.sqrt(24) * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + math.atan(1 / math.sqrt(24)) * 5 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][6]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
# 左边车道直行车道
# [14, 13, 8, 4, 5, 6, 12], # 1
elif lane2 in [1, 5, 9, 13]:
if lane1 == self.lane2lane[lane2][0]:
delta_d1 = p1 - 3 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(delta_d1 + 7 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][1]:
delta_d1 = p1 - 3 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + 5 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][2]:
delta_d1 = p1 - math.atan(3 / 4) * 5 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + 4 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][3]:
delta_d1 = p1 - math.atan(4 / 3) * 5 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + 4 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][4]:
delta_d1 = p1 - 5 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(delta_d1 + 3 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][5]:
delta_d1 = p1 - 5 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(delta_d1 + self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][6]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
# 右边车道直行车道
elif lane2 in [2, 6, 10, 14]:
# [14, 13, 8, 4, 5, 6, 7], # 2
# if lane2 in [0, 4, 8, 12]:
if lane1 == self.lane2lane[lane2][0]:
delta_d1 = p1 - self.lane_cw
if delta_d1 > thr:
virtual_d.append(delta_d1 + 7 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][1]:
delta_d1 = p1 - self.lane_cw
if delta_d1 > thr:
virtual_d.append(delta_d1 + 5 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][2]:
delta_d1 = p1 - math.atan(1 / math.sqrt(24)) * 5 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + math.sqrt(24) * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][3]:
delta_d1 = p1 - math.atan(math.sqrt(24)) * 5 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(abs(delta_d1) + 8 * self.lane_cw - math.sqrt(24) * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][4]:
delta_d1 = p1 - 7 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(delta_d1 + 3 * self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][5]:
delta_d1 = p1 - 7 * self.lane_cw
if delta_d1 > thr:
virtual_d.append(delta_d1 + self.lane_cw)
choose = True
if lane1 == self.lane2lane[lane2][6]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
# 右转车道
elif lane2 in [3, 7, 11, 15]:
if lane1 == self.lane2lane[lane2][0]:
delta_d1 = p1
if delta_d1 > thr:
virtual_d.append(p1)
choose = True
elif self.lane_num == 12:
# if lane2 in [1, 4, 7, 10]:
# if lane1 == self.lane2lane[lane2][0]:
# # 据碰撞点的距离
# delta_d1 = p1 - 3 * self.lane_cw
# # delta_d2 = p2 - 9 * self.lane_cw
# # delta_d = delta_d1 - delta_d2
# if delta_d1 > thr:
# virtual_d.append(9 * self.lane_cw + delta_d1)
# choose = True
# elif lane1 == self.lane2lane[lane2][1]:
# # 角度为alpha段的圆弧长度
# beta_d = self.beta * 7 * self.lane_cw
# delta_d1 = p1 - beta_d
# # delta_d2 = p2 - 6 * self.lane_cw - self.cita
# # delta_d = delta_d1 - delta_d2
# if delta_d1 > thr:
# virtual_d.append(6 * self.lane_cw - self.cita + delta_d1)
# choose = True
# elif lane1 == self.lane2lane[lane2][2]:
# # 角度为beta段的圆弧长度
# alpha_d = self.alpha * 7 * self.lane_cw
# delta_d1 = p1 - alpha_d
# # delta_d2 = p2 - 6 * self.lane_cw + self.cita
# # delta_d = delta_d1 - delta_d2
# if delta_d1 > thr:
# virtual_d.append(6 * self.lane_cw - self.cita + delta_d1)
# choose = True
# elif lane1 == self.lane2lane[lane2][3]:
# delta_d1 = p1 - 9 * self.lane_cw
# # delta_d2 = p2 - 3 * self.lane_cw
# # delta_d = delta_d1 - delta_d2
# if delta_d1 > thr:
# virtual_d.append(3 * self.lane_cw + delta_d1)
# choose = True
# else:
# if p1 > thr:
# virtual_d.append(p1)
# choose = True
# elif lane2 in [0, 3, 6, 9]:
# if lane1 == self.lane2lane[lane2][0]:
# delta_d1 = p1 - 6 * self.lane_cw + self.cita
# # delta_d2 = p2 - self.alpha * 7 * self.lane_cw
# # delta_d = delta_d1 - delta_d2
# if delta_d1 > thr:
# virtual_d.append(self.alpha * 7 * self.lane_cw + delta_d1)
# choose = True
# elif lane1 == self.lane2lane[lane2][1]:
# delta_d1 = p1 - self.gama * 7 * self.lane_cw
# # delta_d2 = p2 - self._gama * 7 * self.lane_cw
# # delta_d = delta_d1 - delta_d2
# if delta_d1 > thr:
# virtual_d.append(self._gama * 7 * self.lane_cw + delta_d1)
# choose = True
# elif lane1 == self.lane2lane[lane2][2]:
# delta_d1 = p1 - self._gama * 7 * self.lane_cw
# # delta_d2 = p2 - self.gama * 7 * self.lane_cw
# # delta_d = delta_d1 - delta_d2
# if delta_d1 > thr:
# virtual_d.append(self.gama * 7 * self.lane_cw + delta_d1)
# choose = True
# else:
# delta_d1 = p1 - 6 * self.lane_cw - self.cita
# # delta_d2 = p2 - self.beta * 7 * self.lane_cw
# # delta_d = delta_d1 - delta_d2
# if delta_d1 > thr:
# virtual_d.append(self.beta * 7 * self.lane_cw + delta_d1)
# choose = True
# else:
# if p1 > thr:
# virtual_d.append(p1)
# choose = True
if lane2 in [1, 4, 7, 10]:
if lane1 == self.lane2lane[lane2][0]:
# 据碰撞点的距离
delta_d1 = p1 - 3 * self.lane_cw
# delta_d2 = p2 - 9 * self.lane_cw
# delta_d = delta_d1 - delta_d2
if delta_d1 > thr:
virtual_d.append(9 * self.lane_cw + delta_d1)
choose = True
elif lane1 == self.lane2lane[lane2][1]:
# 角度为alpha段的圆弧长度
beta_d = self.beta * 7 * self.lane_cw
delta_d1 = p1 - beta_d
# delta_d2 = p2 - 6 * self.lane_cw - self.cita
# delta_d = delta_d1 - delta_d2
if delta_d1 > thr:
virtual_d.append(6 * self.lane_cw + self.cita + delta_d1)
choose = True
elif lane1 == self.lane2lane[lane2][2]:
# 角度为beta段的圆弧长度
alpha_d = self.alpha * 7 * self.lane_cw
delta_d1 = p1 - alpha_d
# delta_d2 = p2 - 6 * self.lane_cw + self.cita
# delta_d = delta_d1 - delta_d2
if delta_d1 > thr:
virtual_d.append(6 * self.lane_cw - self.cita + delta_d1)
choose = True
elif lane1 == self.lane2lane[lane2][3]:
delta_d1 = p1 - 9 * self.lane_cw
# delta_d2 = p2 - 3 * self.lane_cw
# delta_d = delta_d1 - delta_d2
if delta_d1 > thr:
virtual_d.append(3 * self.lane_cw + delta_d1)
choose = True
else:
if p1 > 0:
virtual_d.append(p1)
choose = True
elif lane2 in [0, 3, 6, 9]:
if lane1 == self.lane2lane[lane2][0]:
delta_d1 = p1 - 6 * self.lane_cw + self.cita
# delta_d2 = p2 - self.alpha * 7 * self.lane_cw
# delta_d = delta_d1 - delta_d2
if delta_d1 > thr:
virtual_d.append(self.alpha * 7 * self.lane_cw + delta_d1)
choose = True
elif lane1 == self.lane2lane[lane2][1]:
delta_d1 = p1 - self.gama * 7 * self.lane_cw
# delta_d2 = p2 - self._gama * 7 * self.lane_cw
# delta_d = delta_d1 - delta_d2
if delta_d1 > thr:
virtual_d.append(self._gama * 7 * self.lane_cw + delta_d1)
choose = True
elif lane1 == self.lane2lane[lane2][2]:
delta_d1 = p1 - self._gama * 7 * self.lane_cw
# delta_d2 = p2 - self.gama * 7 * self.lane_cw
# delta_d = delta_d1 - delta_d2
if delta_d1 > thr:
virtual_d.append(self.gama * 7 * self.lane_cw + delta_d1)
choose = True
else:
delta_d1 = p1 - 6 * self.lane_cw - self.cita
# delta_d2 = p2 - self.beta * 7 * self.lane_cw
# delta_d = delta_d1 - delta_d2
if delta_d1 > thr:
virtual_d.append(self.beta * 7 * self.lane_cw + delta_d1)
choose = True
else:
if p1 > 0:
virtual_d.append(p1)
choose = True
return virtual_d, choose
# 根据车道的位置计算其真实位置
def get_p(self, p, i, intention):
# x, y, yaw(与x轴正方向夹角)
new_p = [0, 0, 0]
# car_info = self.veh_info[i][j]
intention_info = intention
if self.lane_num == 3:
if i == 0:
# 直行
if intention_info == 1:
new_p[0] = -1 * p + 2 * self.lane_cw
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
# 右转
else:
if p > self.lane_info[2][1]:
new_p[0] = -1 * (p - self.lane_info[2][1] + 2 * self.lane_cw)
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
elif p > 0:
beta_temp = p / self.lane_cw
delta_y = math.sin(beta_temp) * self.lane_cw
delta_x = math.cos(beta_temp) * self.lane_cw
new_p[0] = -1 * (2 * self.lane_cw - delta_x)
new_p[1] = -1 * (2 * self.lane_cw - delta_y)
new_p[2] = beta_temp + 1.5 * 3.1415
else:
new_p[0] = -1 * self.lane_cw
new_p[1] = -1 * (-1 * p + 2 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
elif i == 1:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = 1 * (p - self.lane_info[0][1] + 2 * self.lane_cw)
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
elif p > 0:
beta_temp = p / (3 * self.lane_cw) # rad
delta_y = math.sin(beta_temp) * 3 * self.lane_cw
delta_x = math.cos(beta_temp) * 3 * self.lane_cw
new_p[0] = -1 * (delta_x - 2 * self.lane_cw)
new_p[1] = -1 * (2 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp + 3.1415
else:
new_p[0] = -1 * self.lane_cw
new_p[1] = -1 * (-1 * p + 2 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
# 直行
else:
new_p[0] = p - 2 * self.lane_cw
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
else:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = 1 * self.lane_cw
new_p[1] = -1 * (p - self.lane_info[0][1] + 2 * self.lane_cw)
new_p[2] = 3.1415 / 2
elif p > 0:
beta_temp = p / (3 * self.lane_cw) # rad
delta_x = math.sin(beta_temp) * 3 * self.lane_cw
delta_y = math.cos(beta_temp) * 3 * self.lane_cw
new_p[0] = 1 * (delta_x - 2 * self.lane_cw)
new_p[1] = -1 * (2 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp + 3.1415 / 2
else:
new_p[0] = -1 * (-1 * p + 2 * self.lane_cw)
new_p[1] = self.lane_cw
new_p[2] = 3.1415
# 右转
else:
if p > self.lane_info[2][1]:
new_p[0] = 1 * self.lane_cw
new_p[1] = -1 * (p - self.lane_info[2][1] + 2 * self.lane_cw)
new_p[2] = 3.1415 / 2
elif p > 0:
beta_temp = p / self.lane_cw
delta_x = math.sin(beta_temp) * self.lane_cw
delta_y = math.cos(beta_temp) * self.lane_cw
new_p[0] = 1 * (2 * self.lane_cw - delta_x)
new_p[1] = -1 * (2 * self.lane_cw - delta_y)
new_p[2] = beta_temp
else:
new_p[0] = -1 * p + 2 * self.lane_cw
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
elif self.lane_num == 4:
if i == 0:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = -1 * (p - self.lane_info[0][1] + 2 * self.lane_cw)
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
elif p > 0:
beta_temp = p / (3 * self.lane_cw) # rad
delta_y = math.sin(beta_temp) * 3 * self.lane_cw
delta_x = math.cos(beta_temp) * 3 * self.lane_cw
new_p[0] = 1 * (delta_x - 2 * self.lane_cw)
new_p[1] = 1 * (2 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp
else:
new_p[0] = self.lane_cw
new_p[1] = -1 * p + 2 * self.lane_cw
new_p[2] = 3.1415 / 2
# 直行
elif intention_info == 1:
new_p[0] = -1 * p + 2 * self.lane_cw
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
# 右转
else:
if p > self.lane_info[2][1]:
new_p[0] = -1 * (p - self.lane_info[2][1] + 2 * self.lane_cw)
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
elif p > 0:
beta_temp = p / self.lane_cw
delta_y = math.sin(beta_temp) * self.lane_cw
delta_x = math.cos(beta_temp) * self.lane_cw
new_p[0] = -1 * (2 * self.lane_cw - delta_x)
new_p[1] = -1 * (2 * self.lane_cw - delta_y)
new_p[2] = beta_temp + 1.5 * 3.1415
else:
new_p[0] = -1 * self.lane_cw
new_p[1] = -1 * (-1 * p + 2 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
elif i == 1:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = 1 * (p - self.lane_info[0][1] + 2 * self.lane_cw)
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
elif p > 0:
beta_temp = p / (3 * self.lane_cw) # rad
delta_y = math.sin(beta_temp) * 3 * self.lane_cw
delta_x = math.cos(beta_temp) * 3 * self.lane_cw
new_p[0] = -1 * (delta_x - 2 * self.lane_cw)
new_p[1] = -1 * (2 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp + 3.1415
else:
new_p[0] = -1 * self.lane_cw
new_p[1] = -1 * (-1 * p + 2 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
# 直行
elif intention_info == 1:
new_p[0] = p - 2 * self.lane_cw
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
# 右转
else:
if p > self.lane_info[2][1]:
new_p[0] = 1 * (p - self.lane_info[2][1] + 2 * self.lane_cw)
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
elif p > 0:
beta_temp = p / self.lane_cw
delta_y = math.sin(beta_temp) * self.lane_cw
delta_x = math.cos(beta_temp) * self.lane_cw
new_p[0] = 1 * (2 * self.lane_cw - delta_x)
new_p[1] = 1 * (2 * self.lane_cw - delta_y)
new_p[2] = beta_temp + 3.1415 / 2
else:
new_p[0] = 1 * self.lane_cw
new_p[1] = -1 * p + 2 * self.lane_cw
new_p[2] = 3.1415 / 2
elif i == 2:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = 1 * self.lane_cw
new_p[1] = -1 * (p - self.lane_info[0][1] + 2 * self.lane_cw)
new_p[2] = 3.1415 / 2
elif p > 0:
beta_temp = p / (3 * self.lane_cw) # rad
delta_x = math.sin(beta_temp) * 3 * self.lane_cw
delta_y = math.cos(beta_temp) * 3 * self.lane_cw
new_p[0] = 1 * (delta_x - 2 * self.lane_cw)
new_p[1] = -1 * (2 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp + 3.1415 / 2
else:
new_p[0] = -1 * (-1 * p + 2 * self.lane_cw)
new_p[1] = self.lane_cw
new_p[2] = 3.1415
# 直行
elif intention_info == 1:
new_p[0] = self.lane_cw
new_p[1] = -1 * p + 2 * self.lane_cw
new_p[2] = 3.1415 / 2
# 右转
else:
if p > self.lane_info[2][1]:
new_p[0] = 1 * self.lane_cw
new_p[1] = -1 * (p - self.lane_info[2][1] + 2 * self.lane_cw)
new_p[2] = 3.1415 / 2
elif p > 0:
beta_temp = p / self.lane_cw
delta_x = math.sin(beta_temp) * self.lane_cw
delta_y = math.cos(beta_temp) * self.lane_cw
new_p[0] = 1 * (2 * self.lane_cw - delta_x)
new_p[1] = -1 * (2 * self.lane_cw - delta_y)
new_p[2] = beta_temp
else:
new_p[0] = -1 * p + 2 * self.lane_cw
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
else:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = -1 * self.lane_cw
new_p[1] = 1 * (p - self.lane_info[0][1] + 2 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
elif p > 0:
beta_temp = p / (3 * self.lane_cw) # rad
delta_x = math.sin(beta_temp) * 3 * self.lane_cw
delta_y = math.cos(beta_temp) * 3 * self.lane_cw
new_p[0] = -1 * (delta_x - 2 * self.lane_cw)
new_p[1] = 1 * (2 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp + 1.5 * 3.1415
else:
new_p[0] = 1 * (-1 * p + 2 * self.lane_cw)
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
# 直行
elif intention_info == 1:
new_p[0] = -1 * self.lane_cw
new_p[1] = p - 2 * self.lane_cw
new_p[2] = 1.5 * 3.1415
# 右转
else:
if p > self.lane_info[2][1]:
new_p[0] = -1 * self.lane_cw
new_p[1] = 1 * (p - self.lane_info[2][1] + 2 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
elif p > 0:
beta_temp = p / self.lane_cw
delta_x = math.sin(beta_temp) * self.lane_cw
delta_y = math.cos(beta_temp) * self.lane_cw
new_p[0] = -1 * (2 * self.lane_cw - delta_x)
new_p[1] = 1 * (2 * self.lane_cw - delta_y)
new_p[2] = beta_temp + 3.1415
else:
new_p[0] = -1 * (-1 * p + 2 * self.lane_cw)
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
elif self.lane_num == 8:
if i == 0:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = 1 * (p - self.lane_info[0][1] + 4 * self.lane_cw)
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
elif p > 0:
beta_temp = p / (5 * self.lane_cw) # rad
delta_y = math.sin(beta_temp) * 5 * self.lane_cw
delta_x = math.cos(beta_temp) * 5 * self.lane_cw
new_p[0] = -1 * (delta_x - 4 * self.lane_cw)
new_p[1] = -1 * (4 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp + 3.1415
else:
new_p[0] = -1 * self.lane_cw
new_p[1] = -1 * (-1 * p + 4 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
# 直行
elif intention_info == 1:
new_p[0] = p - 4 * self.lane_cw
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
elif i == 1:
# 直行
if intention_info == 1:
new_p[0] = p - 4 * self.lane_cw
new_p[1] = 3 * self.lane_cw
new_p[2] = 3.1415
# 右转
elif intention_info == 2:
if p > self.lane_info[2][1]:
new_p[0] = 1 * (p - self.lane_info[2][1] + 4 * self.lane_cw)
new_p[1] = 3 * self.lane_cw
new_p[2] = 3.1415
elif p > 0:
beta_temp = p / self.lane_cw
delta_y = math.sin(beta_temp) * self.lane_cw
delta_x = math.cos(beta_temp) * self.lane_cw
new_p[0] = 1 * (4 * self.lane_cw - delta_x)
new_p[1] = 1 * (4 * self.lane_cw - delta_y)
new_p[2] = beta_temp + 3.1415 / 2
else:
new_p[0] = 3 * self.lane_cw
new_p[1] = -1 * p + 4 * self.lane_cw
new_p[2] = 3.1415 / 2
elif i == 2:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = -1 * self.lane_cw
new_p[1] = 1 * (p - self.lane_info[0][1] + 4 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
elif p > 0:
beta_temp = p / (5 * self.lane_cw) # rad
delta_x = math.sin(beta_temp) * 5 * self.lane_cw
delta_y = math.cos(beta_temp) * 5 * self.lane_cw
new_p[0] = -1 * (delta_x - 4 * self.lane_cw)
new_p[1] = 1 * (4 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp + 1.5 * 3.1415
else:
new_p[0] = 1 * (-1 * p + 4 * self.lane_cw)
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
# 直行
elif intention_info == 1:
new_p[0] = -1 * self.lane_cw
new_p[1] = p - 4 * self.lane_cw
new_p[2] = 1.5 * 3.1415
elif i == 3:
# 直行
if intention_info == 1:
new_p[0] = -3 * self.lane_cw
new_p[1] = p - 4 * self.lane_cw
new_p[2] = 1.5 * 3.1415
# 右转
elif intention_info == 2:
if p > self.lane_info[2][1]:
new_p[0] = -3 * self.lane_cw
new_p[1] = 1 * (p - self.lane_info[2][1] + 4 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
elif p > 0:
beta_temp = p / self.lane_cw
delta_x = math.sin(beta_temp) * self.lane_cw
delta_y = math.cos(beta_temp) * self.lane_cw
new_p[0] = -1 * (4 * self.lane_cw - delta_x)
new_p[1] = 1 * (4 * self.lane_cw - delta_y)
new_p[2] = beta_temp + 3.1415
else:
new_p[0] = -1 * (-1 * p + 4 * self.lane_cw)
new_p[1] = 3 * self.lane_cw
new_p[2] = 3.1415
elif i == 4:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = -1 * (p - self.lane_info[0][1] + 4 * self.lane_cw)
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
elif p > 0:
beta_temp = p / (5 * self.lane_cw) # rad
delta_y = math.sin(beta_temp) * 5 * self.lane_cw
delta_x = math.cos(beta_temp) * 5 * self.lane_cw
new_p[0] = 1 * (delta_x - 4 * self.lane_cw)
new_p[1] = 1 * (4 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp
else:
new_p[0] = self.lane_cw
new_p[1] = -1 * p + 4 * self.lane_cw
new_p[2] = 3.1415 / 2
# 直行
elif intention_info == 1:
new_p[0] = -1 * p + 4 * self.lane_cw
new_p[1] = -1 * self.lane_cw
new_p[2] = 0
elif i == 5:
# 直行
if intention_info == 1:
new_p[0] = -1 * p + 4 * self.lane_cw
new_p[1] = -3 * self.lane_cw
new_p[2] = 0
# 右转
elif intention_info == 2:
if p > self.lane_info[2][1]:
new_p[0] = -1 * (p - self.lane_info[2][1] + 4 * self.lane_cw)
new_p[1] = -3 * self.lane_cw
new_p[2] = 0
elif p > 0:
beta_temp = p / self.lane_cw
delta_y = math.sin(beta_temp) * self.lane_cw
delta_x = math.cos(beta_temp) * self.lane_cw
new_p[0] = -1 * (4 * self.lane_cw - delta_x)
new_p[1] = -1 * (4 * self.lane_cw - delta_y)
new_p[2] = beta_temp + 1.5 * 3.1415
else:
new_p[0] = -3 * self.lane_cw
new_p[1] = -1 * (-1 * p + 4 * self.lane_cw)
new_p[2] = 1.5 * 3.1415
elif i == 6:
# 左转
if intention_info == 0:
# 没到交叉口中
if p > self.lane_info[0][1]:
new_p[0] = 1 * self.lane_cw
new_p[1] = -1 * (p - self.lane_info[0][1] + 4 * self.lane_cw)
new_p[2] = 3.1415 / 2
elif p > 0:
beta_temp = p / (5 * self.lane_cw) # rad
delta_x = math.sin(beta_temp) * 5 * self.lane_cw
delta_y = math.cos(beta_temp) * 5 * self.lane_cw
new_p[0] = 1 * (delta_x - 4 * self.lane_cw)
new_p[1] = -1 * (4 * self.lane_cw - delta_y)
new_p[2] = 3.1415 / 2 - beta_temp + 3.1415 / 2
else:
new_p[0] = -1 * (-1 * p + 4 * self.lane_cw)
new_p[1] = 1 * self.lane_cw
new_p[2] = 3.1415
# 直行
elif intention_info == 1:
new_p[0] = 1 * self.lane_cw
new_p[1] = -1 * p + 4 * self.lane_cw
new_p[2] = 3.1415 / 2
elif i == 7:
# 直行
if intention_info == 1:
new_p[0] = 3 * self.lane_cw
new_p[1] = -1 * p + 4 * self.lane_cw
new_p[2] = 3.1415 / 2
# 右转
elif intention_info == 2:
if p > self.lane_info[2][1]:
new_p[0] = 3 * self.lane_cw
new_p[1] = -1 * (p - self.lane_info[2][1] + 4 * self.lane_cw)
new_p[2] = 3.1415 / 2
elif p > 0:
beta_temp = p / self.lane_cw
delta_x = math.sin(beta_temp) * self.lane_cw
delta_y = math.cos(beta_temp) * self.lane_cw
new_p[0] = 1 * (4 * self.lane_cw - delta_x)
new_p[1] = -1 * (4 * self.lane_cw - delta_y)
new_p[2] = beta_temp
else:
new_p[0] = -1 * p + 4 * self.lane_cw
new_p[1] = -3 * self.lane_cw
new_p[2] = 0
elif self.lane_num == 12:
rotation_angle = 3.141593 / 2 * int(i / 3)
p_temp = [0, 0, 0, 0]
if i % 3 == 0:
if p > self.lane_info[0][1]:
yaw = (3.1415 + i * (3.1415 / 6)) % (2 * 3.1415)
p_temp = [p - self.lane_info[0][1] + 6 * self.lane_cw, self.lane_cw, yaw]
elif p > 0:
yaw = (3.1415 + i * (3.1415 / 6)) % (2 * 3.1415)
r_a = (self.lane_info[0][1] - p) / self.lane_info[0][1] * 3.141593 / 2
p0 = [6 * self.lane_cw, self.lane_cw]
p_r = [6 * self.lane_cw, -6 * self.lane_cw]
p_temp[0] = p_r[0] + (p0[0] - p_r[0]) * np.cos(r_a) - (p0[1] - p_r[1]) * np.sin(r_a)
p_temp[1] = p_r[1] + (p0[1] - p_r[1]) * np.cos(r_a) + (p0[0] - p_r[0]) * np.sin(r_a)
p_temp[2] = yaw + r_a
else:
yaw = (1.5 * 3.1415 + i * (3.1415 / 6)) % (2 * 3.1415)
p_temp = [-self.lane_cw, -6 * self.lane_cw + p, yaw]
elif i % 3 == 1:
yaw = (3.1415 + (i - 1) * (3.1415 / 6)) % (2 * 3.1415)
p_temp = [p - 6 * self.lane_cw, 3 * self.lane_cw, yaw]
else:
if p > self.lane_info[2][1]:
yaw = (3.1415 + (i - 2) * (3.1415 / 6)) % (2 * 3.1415)
p_temp = [p - self.lane_info[2][1] + 6 * self.lane_cw, 5 * self.lane_cw, yaw]
elif p > 0:
yaw = (0.5 * 3.1415 + (i - 2) * (3.1415 / 6)) % (2 * 3.1415)
r_a = (self.lane_info[2][1] - p) / self.lane_info[2][1] * 3.141593 / 2
p0 = [6 * self.lane_cw, 5 * self.lane_cw]
p_r = [6 * self.lane_cw, 6 * self.lane_cw]
# 绕点p_r逆时顺时针旋转r_a角度
p_temp[0] = p_r[0] + (p0[0] - p_r[0]) * np.cos(r_a) + (p0[1] - p_r[1]) * np.sin(r_a)
p_temp[1] = p_r[1] + (p0[1] - p_r[1]) * np.cos(r_a) - (p0[0] - p_r[0]) * | np.sin(r_a) | numpy.sin |
import numpy as nm
from sfepy.base.base import Struct
from sfepy.terms.terms import Term
from sfepy.terms.extmods import terms
class ContactInfo(Struct):
"""
Various contact-related data of contact terms.
"""
pass
class ContactTerm(Term):
r"""
Contact term with a penalty function.
The penalty function is defined as :math:`\varepsilon_N \langle g_N(\ul{u})
\rangle`, where :math:`\varepsilon_N` is the normal penalty parameter and
:math:`\langle g_N(\ul{u}) \rangle` are the Macaulay's brackets of the gap
function :math:`g_N(\ul{u})`.
This term has a dynamic connectivity of DOFs in its region.
:Definition:
.. math::
\int_{\Gamma_{c}} \varepsilon_N \langle g_N(\ul{u}) \rangle \ul{n}
\ul{v}
:Arguments:
- material : :math:`\varepsilon_N`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_contact'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '.: 1',
'virtual' : ('D', 'state'), 'state' : 'D'}
integration = 'surface'
def __init__(self, *args, **kwargs):
Term.__init__(self, *args, **kwargs)
self.detect = 2
self.ci = None
def call_function(self, fargs):
try:
out, status = self.function(*fargs)
except (RuntimeError, ValueError):
terms.errclear()
raise
if status:
terms.errclear()
raise ValueError('term evaluation failed! (%s)' % self.name)
return out, status
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
out, status = self.call_function(fargs)
if mode != 'weak':
raise ValueError('unsupported evaluation mode! (%s)' % mode)
return out, status
@staticmethod
def function(out_cc):
return out_cc, 0
def get_fargs(self, epss, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
geo, _ = self.get_mapping(virtual)
region = self.region
if self.ci is None:
self.ci = ContactInfo()
# Uses field connectivity (higher order nodes).
sd = state.field.surface_data[region.name]
ISN = state.field.efaces.T.copy()
nsd = region.dim
ngp = geo.n_qp
neq = state.n_dof
nsn = ISN.shape[0]
fis = region.get_facet_indices()
elementID = fis[:, 0].copy()
segmentID = fis[:, 1].copy()
n = len(elementID)
IEN = state.field.econn
# Need surface bf, bfg corresponding to field approximation here, not
# geo...
H = nm.asfortranarray(geo.bf[0, :, 0, :])
ps = state.field.gel.surface_facet.poly_space
gps, gw = self.integral.get_qp(state.field.gel.surface_facet.name)
bfg = ps.eval_base(gps, diff=True)
dH = nm.asfortranarray(bfg.ravel().reshape(((nsd - 1) * ngp, nsn)))
X = nm.asfortranarray(state.field.coors)
Um = nm.asfortranarray(state().reshape((-1, nsd)))
xx = nm.asfortranarray(X + Um)
import sfepy.mechanics.extmods.ccontres as cc
GPs = nm.empty((n*ngp, 2*nsd+6), dtype=nm.float64, order='F')
longestEdge, GPs = cc.get_longest_edge_and_gps(GPs, neq,
elementID, segmentID,
ISN, IEN, H, xx)
AABBmin, AABBmax = cc.get_AABB(xx, longestEdge, IEN, ISN,
elementID, segmentID, neq);
AABBmin = AABBmin - (0.5*longestEdge);
AABBmax = AABBmax + (0.5*longestEdge);
N = nm.ceil((AABBmax - AABBmin) / (0.5*longestEdge)).astype(nm.int32)
N = nm.ones(nsd, dtype=nm.int32) # BUG workaround.
head, next = cc.init_global_search(N, AABBmin, AABBmax, GPs[:,:nsd])
npd = region.tdim - 1
GPs = cc.evaluate_contact_constraints(GPs, ISN, IEN, N,
AABBmin, AABBmax,
head, next, xx,
elementID, segmentID,
npd, neq, longestEdge)
Gc = | nm.zeros(neq, dtype=nm.float64) | numpy.zeros |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from collections import OrderedDict
import numpy as np
from pyiron_atomistics.atomistics.structure.atoms import Atoms
import warnings
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
def read_atoms(
filename="CONTCAR",
return_velocities=False,
species_list=None,
species_from_potcar=False,
):
"""
Routine to read structural static from a POSCAR type file
Args:
filename (str): Input filename
return_velocities (bool): True if the predictor corrector velocities are read (only from MD output)
species_list (list/numpy.ndarray): A list of the species (if not present in the POSCAR file or a POTCAR in the
same directory)
species_from_potcar (bool): True if the species list should be read from the POTCAR file in the same directory
Returns:
pyiron.atomistics.structure.atoms.Atoms: The generated structure object
"""
directory = "/".join(filename.split("/")[0:-1])
potcar_file = "/".join([directory, "POTCAR"])
if (species_list is None) and species_from_potcar:
species_list = get_species_list_from_potcar(potcar_file)
if len(species_list) == 0:
warnings.warn("Warning! Unable to read species information from POTCAR")
file_string = list()
with open(filename) as f:
for line in f:
line = line.strip()
file_string.append(line)
return atoms_from_string(
file_string, read_velocities=return_velocities, species_list=species_list
)
def get_species_list_from_potcar(filename="POTCAR"):
"""
Generates the species list from a POTCAR type file
Args:
filename (str): Input filename
Returns:
list: A list of species symbols
"""
trigger = "VRHFIN ="
species_list = list()
with open(filename) as potcar_file:
lines = potcar_file.readlines()
for line in lines:
line = line.strip()
if trigger in line:
str_1 = line.split(trigger)
str_2 = str_1[-1].split(":")
species_list.append(str_2[0].replace(" ", ""))
return species_list
def write_poscar(structure, filename="POSCAR", write_species=True, cartesian=True):
"""
Writes a POSCAR type file from a structure object
Args:
structure (pyiron.atomistics.structure.atoms.Atoms): The structure instance to be written to the POSCAR format
filename (str): Output filename
write_species (bool): True if the species should be written to the file
cartesian (bool): True if the positions are written in Cartesian coordinates
"""
endline = "\n"
with open(filename, "w") as f:
selec_dyn = False
f.write("Poscar file generated with pyiron" + endline)
f.write("1.0" + endline)
for a_i in structure.get_cell():
x, y, z = a_i
f.write("{0:.15f} {1:.15f} {2:.15f}".format(x, y, z) + endline)
atom_numbers = structure.get_number_species_atoms()
if write_species:
f.write(" ".join(atom_numbers.keys()) + endline)
num_str = [str(val) for val in atom_numbers.values()]
f.write(" ".join(num_str))
f.write(endline)
if "selective_dynamics" in structure.get_tags():
selec_dyn = True
f.write("Selective dynamics" + endline)
sorted_coords = list()
selec_dyn_lst = list()
for species in atom_numbers.keys():
indices = structure.select_index(species)
for i in indices:
if cartesian:
sorted_coords.append(structure.positions[i])
else:
sorted_coords.append(structure.get_scaled_positions()[i])
if selec_dyn:
selec_dyn_lst.append(structure.selective_dynamics[i])
if cartesian:
f.write("Cartesian" + endline)
else:
f.write("Direct" + endline)
if selec_dyn:
for i, vec in enumerate(sorted_coords):
x, y, z = vec
sd_string = " ".join(["T" if sd else "F" for sd in selec_dyn_lst[i]])
f.write(
"{0:.15f} {1:.15f} {2:.15f}".format(x, y, z)
+ " "
+ sd_string
+ endline
)
else:
for i, vec in enumerate(sorted_coords):
x, y, z = vec
f.write("{0:.15f} {1:.15f} {2:.15f}".format(x, y, z) + endline)
def atoms_from_string(string, read_velocities=False, species_list=None):
"""
Routine to convert a string list read from a input/output structure file and convert into Atoms instance
Args:
string (list): A list of strings (lines) read from the POSCAR/CONTCAR/CHGCAR/LOCPOT file
read_velocities (bool): True if the velocities from a CONTCAR file should be read (predictor corrector)
species_list (list/numpy.ndarray): A list of species of the atoms
Returns:
pyiron.atomistics.structure.atoms.Atoms: The required structure object
"""
string = [s.strip() for s in string]
string_lower = [s.lower() for s in string]
atoms_dict = dict()
atoms_dict["first_line"] = string[0]
# del string[0]
atoms_dict["selective_dynamics"] = False
atoms_dict["relative"] = False
if "direct" in string_lower or "d" in string_lower:
atoms_dict["relative"] = True
atoms_dict["scaling_factor"] = float(string[1])
unscaled_cell = list()
for i in [2, 3, 4]:
vec = list()
for j in range(3):
vec.append(float(string[i].split()[j]))
unscaled_cell.append(vec)
if atoms_dict["scaling_factor"] > 0.0:
atoms_dict["cell"] = np.array(unscaled_cell) * atoms_dict["scaling_factor"]
else:
atoms_dict["cell"] = np.array(unscaled_cell) * (
(-atoms_dict["scaling_factor"]) ** (1.0 / 3.0)
)
if "selective dynamics" in string_lower:
atoms_dict["selective_dynamics"] = True
no_of_species = len(string[5].split())
species_dict = OrderedDict()
position_index = 7
if atoms_dict["selective_dynamics"]:
position_index += 1
for i in range(no_of_species):
species_dict["species_" + str(i)] = dict()
try:
species_dict["species_" + str(i)]["count"] = int(string[5].split()[i])
except ValueError:
species_dict["species_" + str(i)]["species"] = string[5].split()[i]
species_dict["species_" + str(i)]["count"] = int(string[6].split()[i])
atoms_dict["species_dict"] = species_dict
if "species" in atoms_dict["species_dict"]["species_0"].keys():
position_index += 1
positions = list()
selective_dynamics = list()
n_atoms = sum(
[
atoms_dict["species_dict"][key]["count"]
for key in atoms_dict["species_dict"].keys()
]
)
try:
for i in range(position_index, position_index + n_atoms):
string_list = np.array(string[i].split())
positions.append([float(val) for val in string_list[0:3]])
if atoms_dict["selective_dynamics"]:
selective_dynamics.append(["T" in val for val in string_list[3:6]])
except (ValueError, IndexError):
raise AssertionError(
"The number of positions given does not match the number of atoms"
)
atoms_dict["positions"] = np.array(positions)
if not atoms_dict["relative"]:
if atoms_dict["scaling_factor"] > 0.0:
atoms_dict["positions"] *= atoms_dict["scaling_factor"]
else:
atoms_dict["positions"] *= (-atoms_dict["scaling_factor"]) ** (1.0 / 3.0)
velocities = list()
try:
atoms = _dict_to_atoms(atoms_dict, species_list=species_list)
except ValueError:
atoms = _dict_to_atoms(atoms_dict, read_from_first_line=True)
if atoms_dict["selective_dynamics"]:
selective_dynamics = np.array(selective_dynamics)
unique_sel_dyn, inverse, counts = np.unique(
selective_dynamics, axis=0, return_counts=True, return_inverse=True
)
count_index = np.argmax(counts)
atoms.add_tag(selective_dynamics=unique_sel_dyn.tolist()[count_index])
is_not_majority = np.arange(len(unique_sel_dyn), dtype=int) != count_index
for i, val in enumerate(unique_sel_dyn):
if is_not_majority[i]:
for key in np.argwhere(inverse == i).flatten():
atoms.selective_dynamics[int(key)] = val.tolist()
if read_velocities:
velocity_index = position_index + n_atoms + 1
for i in range(velocity_index, velocity_index + n_atoms):
try:
velocities.append([float(val) for val in string[i].split()[0:3]])
except IndexError:
break
if not (len(velocities) == n_atoms):
warnings.warn(
"The velocities are either not available or they are incomplete/corrupted. Returning empty "
"list instead",
UserWarning,
)
return atoms, list()
return atoms, velocities
else:
return atoms
def _dict_to_atoms(atoms_dict, species_list=None, read_from_first_line=False):
"""
Function to convert a generated dict into an structure object
Args:
atoms_dict (dict): Dictionary with the details (from string_to_atom)
species_list (list/numpy.ndarray): List of species
read_from_first_line (bool): True if we are to read the species information from the first line in the file
Returns:
pyiron.atomistics.structure.atoms.Atoms: The required structure object
"""
is_absolute = not (atoms_dict["relative"])
positions = atoms_dict["positions"]
cell = atoms_dict["cell"]
symbol = str()
elements = list()
el_list = list()
for i, sp_key in enumerate(atoms_dict["species_dict"].keys()):
if species_list is not None:
try:
el_list = np.array([species_list[i]])
el_list = np.tile(el_list, atoms_dict["species_dict"][sp_key]["count"])
if isinstance(species_list[i], str):
symbol += species_list[i] + str(
atoms_dict["species_dict"][sp_key]["count"]
)
else:
symbol += species_list[i].Abbreviation + str(
atoms_dict["species_dict"][sp_key]["count"]
)
except IndexError:
raise ValueError(
"Number of species in the specified species list does not match that in the file"
)
elif "species" in atoms_dict["species_dict"][sp_key].keys():
el_list = np.array([atoms_dict["species_dict"][sp_key]["species"]])
el_list = np.tile(el_list, atoms_dict["species_dict"][sp_key]["count"])
symbol += atoms_dict["species_dict"][sp_key]["species"]
symbol += str(atoms_dict["species_dict"][sp_key]["count"])
elif read_from_first_line:
if not (
len(atoms_dict["first_line"].split())
== len(atoms_dict["species_dict"].keys())
):
raise AssertionError()
el_list = np.array(atoms_dict["first_line"].split()[i])
el_list = np.tile(el_list, atoms_dict["species_dict"][sp_key]["count"])
symbol += atoms_dict["first_line"].split()[i]
symbol += str(atoms_dict["species_dict"][sp_key]["count"])
elif species_list is None:
raise ValueError(
"Species list should be provided since pyiron can't detect species information"
)
elements.append(el_list)
elements_new = list()
for ele in elements:
for e in ele:
elements_new.append(e)
elements = elements_new
if is_absolute:
atoms = Atoms(elements, positions=positions, cell=cell, pbc=True)
else:
atoms = Atoms(elements, scaled_positions=positions, cell=cell, pbc=True)
return atoms
def vasp_sorter(structure):
"""
Routine to sort the indices of a structure as it would be when written to a POSCAR file
Args:
structure (pyiron.atomistics.structure.atoms.Atoms): The structure whose indices need to be sorted
Returns:
list: A list of indices which is sorted by the corresponding species for writing to POSCAR
"""
atom_numbers = structure.get_number_species_atoms()
sorted_indices = list()
for species in atom_numbers.keys():
indices = structure.select_index(species)
for i in indices:
sorted_indices.append(i)
return np.array(sorted_indices)
def manip_contcar(filename, new_filename, add_pos):
"""
Manipulate a CONTCAR/POSCAR file by adding something to the positions
Args:
filename (str): Filename/path of the input file
new_filename (str): Filename/path of the output file
add_pos (list/numpy.ndarray): Array of values to be added to the positions of the input
"""
actual_struct = read_atoms(filename)
n = 0
direct = True
with open(filename, "r") as f:
lines = f.readlines()
for line in lines:
if "Direct" in line or "Cartesian" in line:
direct = "Direct" in line
break
n += 1
pos_list = list()
sd_list = list()
if len(lines[n + 1].split()) == 6:
for line in lines[n + 1 : n + 1 + len(actual_struct)]:
pos_list.append([float(val) for val in line.split()[0:3]])
sd_list.append(["T" in val for val in line.split()[3:]])
else:
for line in lines[n + 1 : n + 1 + len(actual_struct)]:
pos_list.append([float(val) for val in line.split()[0:3]])
old_pos = np.array(pos_list)
if direct:
add_pos_rel = np.dot(add_pos, | np.linalg.inv(actual_struct.cell) | numpy.linalg.inv |
"""
Functions that aid testing in various ways. A typical use would be::
lowcore = create_named_configuration('LOWBD2-CORE')
times = numpy.linspace(-3, +3, 13) * (numpy.pi / 12.0)
frequency = numpy.array([1e8])
channel_bandwidth = numpy.array([1e7])
# Define the component and give it some polarisation and spectral behaviour
f = numpy.array([100.0])
flux = numpy.array([f])
phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
compabsdirection = SkyCoord(ra=17.0 * u.deg, dec=-36.5 * u.deg, frame='icrs', equinox='J2000')
comp = create_skycomponent(flux=flux, frequency=frequency, direction=compabsdirection,
polarisation_frame=PolarisationFrame('stokesI'))
image = create_test_image(frequency=frequency, phasecentre=phasecentre,
cellsize=0.001,
polarisation_frame=PolarisationFrame('stokesI')
vis = create_visibility(lowcore, times=times, frequency=frequency,
channel_bandwidth=channel_bandwidth,
phasecentre=phasecentre, weight=1,
polarisation_frame=PolarisationFrame('stokesI'),
integration_time=1.0)
"""
import csv
import logging
from typing import List
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_skycoord
from scipy import interpolate
from data_models.memory_data_models import Configuration, Image, GainTable, Skycomponent, SkyModel, PointingTable
from data_models.parameters import arl_path
from data_models.polarisation import PolarisationFrame
from processing_components.calibration.calibration_control import create_calibration_controls
from processing_components.calibration.operations import create_gaintable_from_blockvisibility, apply_gaintable
from processing_components.image.operations import import_image_from_fits
from processing_components.imaging.base import predict_2d, predict_skycomponent_visibility, \
create_image_from_visibility, advise_wide_field
from processing_components.imaging.primary_beams import create_pb
from processing_components.skycomponent.operations import create_skycomponent, insert_skycomponent, \
apply_beam_to_skycomponent, filter_skycomponents_by_flux
from processing_components.visibility.base import create_blockvisibility, create_visibility
from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility, \
convert_visibility_to_blockvisibility
from processing_library.image.operations import create_image_from_array
log = logging.getLogger(__name__)
def create_test_image(canonical=True, cellsize=None, frequency=None, channel_bandwidth=None,
phasecentre=None, polarisation_frame=PolarisationFrame("stokesI")) -> Image:
"""Create a useful test image
This is the test image M31 widely used in ALMA and other simulations. It is actually part of an Halpha region in
M31.
:param canonical: Make the image into a 4 dimensional image
:param cellsize:
:param frequency: Frequency (array) in Hz
:param channel_bandwidth: Channel bandwidth (array) in Hz
:param phasecentre: Phase centre of image (SkyCoord)
:param polarisation_frame: Polarisation frame
:return: Image
"""
if frequency is None:
frequency = [1e8]
im = import_image_from_fits(arl_path("data/models/M31.MOD"))
if canonical:
if polarisation_frame is None:
im.polarisation_frame = PolarisationFrame("stokesI")
elif isinstance(polarisation_frame, PolarisationFrame):
im.polarisation_frame = polarisation_frame
else:
raise ValueError("polarisation_frame is not valid")
im = replicate_image(im, frequency=frequency, polarisation_frame=im.polarisation_frame)
if cellsize is not None:
im.wcs.wcs.cdelt[0] = -180.0 * cellsize / numpy.pi
im.wcs.wcs.cdelt[1] = +180.0 * cellsize / numpy.pi
if frequency is not None:
im.wcs.wcs.crval[3] = frequency[0]
if channel_bandwidth is not None:
im.wcs.wcs.cdelt[3] = channel_bandwidth[0]
else:
if len(frequency) > 1:
im.wcs.wcs.cdelt[3] = frequency[1] - frequency[0]
else:
im.wcs.wcs.cdelt[3] = 0.001 * frequency[0]
im.wcs.wcs.radesys = 'ICRS'
im.wcs.wcs.equinox = 2000.00
if phasecentre is not None:
im.wcs.wcs.crval[0] = phasecentre.ra.deg
im.wcs.wcs.crval[1] = phasecentre.dec.deg
# WCS is 1 relative
im.wcs.wcs.crpix[0] = im.data.shape[3] // 2 + 1
im.wcs.wcs.crpix[1] = im.data.shape[2] // 2 + 1
return im
def create_test_image_from_s3(npixel=16384, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015,
frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]),
phasecentre=None, fov=20, flux_limit=1e-3) -> Image:
"""Create MID test image from S3
The input catalog was generated at http://s-cubed.physics.ox.ac.uk/s3_sex using the following query::
Database: s3_sex
SQL: select * from Galaxies where (pow(10,itot_151)*1000 > 1.0) and (right_ascension between -5 and 5) and (declination between -5 and 5);;
Number of rows returned: 29966
For frequencies < 610MHz, there are three tables to use::
data/models/S3_151MHz_10deg.csv, use fov=10
data/models/S3_151MHz_20deg.csv, use fov=20
data/models/S3_151MHz_40deg.csv, use fov=40
For frequencies > 610MHz, there are three tables:
data/models/S3_1400MHz_1mJy_10deg.csv, use flux_limit>= 1e-3
data/models/S3_1400MHz_100uJy_10deg.csv, use flux_limit < 1e-3
data/models/S3_1400MHz_1mJy_18deg.csv, use flux_limit>= 1e-3
data/models/S3_1400MHz_100uJy_18deg.csv, use flux_limit < 1e-3
The component spectral index is calculated from the 610MHz and 151MHz or 1400MHz and 610MHz, and then calculated
for the specified frequencies.
If polarisation_frame is not stokesI then the image will a polarised axis but the values will be zero.
:param npixel: Number of pixels
:param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI"))
:param cellsize: cellsize in radians
:param frequency:
:param channel_bandwidth: Channel width (Hz)
:param phasecentre: phasecentre (SkyCoord)
:param fov: fov 10 | 20 | 40
:param flux_limit: Minimum flux (Jy)
:return: Image
"""
ras = []
decs = []
fluxes = []
if phasecentre is None:
phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
if polarisation_frame is None:
polarisation_frame = PolarisationFrame("stokesI")
npol = polarisation_frame.npol
nchan = len(frequency)
shape = [nchan, npol, npixel, npixel]
w = WCS(naxis=4)
# The negation in the longitude is needed by definition of RA, DEC
w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]]
w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]
w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ']
w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]]
w.naxis = 4
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
model = create_image_from_array( | numpy.zeros(shape) | numpy.zeros |
#***************************************************#
# This file is part of PFNET. #
# #
# Copyright (c) 2015, <NAME>. #
# #
# PFNET is released under the BSD 2-clause license. #
#***************************************************#
import unittest
import numpy as np
import pfnet as pf
from . import test_cases
from numpy.linalg import norm
from scipy.sparse import coo_matrix,triu,bmat
NUM_TRIALS = 25
EPS = 3.5 # %
TOL = 1e-4
class TestProblem(unittest.TestCase):
def setUp(self):
# Random
np.random.seed(0)
def test_problem_ACOPF_with_function_constraint(self):
for case in test_cases.CASES:
net = pf.Parser(case).parse(case)
self.assertEqual(net.num_periods,1)
p = pf.Problem(net)
for branch in net.branches:
if branch.ratingA == 0.:
branch.ratingA = 100.
# Variables
net.set_flags('bus',
['variable'],
'any',
'voltage magnitude')
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
net.set_flags('generator',
['variable','bounded'],
'adjustable active power',
'active power')
net.set_flags('generator',
['variable','bounded'],
'regulator',
'reactive power')
net.set_flags('branch',
['variable','bounded'],
'tap changer',
'tap ratio')
net.set_flags('branch',
['variable','bounded'],
'phase shifter',
'phase shift')
self.assertEqual(net.num_vars, (2*net.num_buses - net.get_num_slack_buses() +
net.get_num_P_adjust_gens() +
net.get_num_reg_gens() +
net.get_num_tap_changers() +
net.get_num_phase_shifters()))
self.assertEqual(net.num_bounded,(net.get_num_P_adjust_gens() +
net.get_num_reg_gens() +
net.get_num_tap_changers() +
net.get_num_phase_shifters()))
p.add_constraint(pf.Constraint('AC power balance',net))
p.add_constraint(pf.Constraint('variable bounds',net))
p.add_function(pf.Function('generation cost',1.,net))
func = pf.Function('generation cost',1.,net)
constr = pf.Constraint('constrained function',net)
constr.set_parameter('func',func)
constr.set_parameter('op','>=')
constr.set_parameter('rhs',0.)
p.add_constraint(constr)
net.set_flags('bus',
'bounded',
'any',
'voltage magnitude')
self.assertEqual(net.num_bounded,(net.get_num_P_adjust_gens() +
net.get_num_reg_gens() +
net.get_num_tap_changers() +
net.get_num_phase_shifters() +
net.num_buses))
p.analyze()
# Extra vars
self.assertEqual(p.num_extra_vars,1)
# Init point
x0 = p.get_init_point()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars+1,))
p.eval(x0)
phi = p.phi
gphi = p.gphi.copy()
Hphi = p.Hphi.copy()
f = p.f.copy()
b = p.b.copy()
A = p.A.copy()
J = p.J.copy()
G = p.G.copy()
l = p.l.copy()
u = p.u.copy()
# Numbers
self.assertEqual(x0.size,p.num_primal_variables)
self.assertEqual(A.shape[0],p.num_linear_equality_constraints)
self.assertEqual(f.size,p.num_nonlinear_equality_constraints)
# phi
self.assertTrue(type(phi) is float)
self.assertGreaterEqual(phi,0.)
# gphi
self.assertTrue(type(gphi) is np.ndarray)
self.assertTupleEqual(gphi.shape,(net.num_vars+1,))
# Hphi
self.assertTrue(type(Hphi) is coo_matrix)
self.assertTupleEqual(Hphi.shape,(net.num_vars+1,net.num_vars+1))
self.assertGreater(Hphi.nnz,0)
# f
self.assertTrue(type(f) is np.ndarray)
f_size = sum(c.f.shape[0] for c in p.constraints)
self.assertTupleEqual(f.shape,(f_size,))
# b
self.assertTrue(type(b) is np.ndarray)
b_size = sum(c.b.shape[0] for c in p.constraints)
self.assertTupleEqual(b.shape,(b_size,))
# J
self.assertTrue(type(J) is coo_matrix)
J_size = sum([c.J.shape[0] for c in p.constraints])
J_nnz = sum([c.J.nnz for c in p.constraints])
self.assertTupleEqual(J.shape,(J_size,net.num_vars+1))
self.assertEqual(J.nnz,J_nnz)
# G, l, u
self.assertTrue(type(G) is coo_matrix)
G_size = sum([c.G.shape[0] for c in p.constraints])
G_nnz = sum([c.G.nnz for c in p.constraints])
self.assertTupleEqual(G.shape,(G_size,net.num_vars+1))
self.assertEqual(G.nnz,G_nnz)
self.assertEqual(l.size,G_size)
self.assertEqual(u.size,G_size)
self.assertFalse(np.any(np.isnan(l)))
self.assertFalse(np.any(np.isnan(u)))
self.assertFalse(np.any(np.isnan(G.data)))
# A
self.assertTrue(type(A) is coo_matrix)
A_size = sum(c.A.shape[0] for c in p.constraints)
A_nnz = sum(c.A.nnz for c in p.constraints)
self.assertTupleEqual(A.shape,(A_size,net.num_vars+1))
self.assertEqual(A.nnz,A_nnz)
def test_problem_with_heur_error(self):
for case in test_cases.CASES:
net = pf.Parser(case).parse(case)
self.assertEqual(net.num_periods,1)
p = pf.Problem(net)
p.add_heuristic(pf.Heuristic('PVPQ switching', net))
p.analyze()
self.assertRaises(pf.ProblemError, p.apply_heuristics, net.get_var_values())
def test_problem_LSNR(self):
# Constants
h = 1e-9
for case in test_cases.CASES:
net = pf.Parser(case).parse(case)
self.assertEqual(net.num_periods,1)
p = pf.Problem(net)
# Variables
net.set_flags('bus',
'variable',
'not slack',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
net.set_flags('branch',
'variable',
'tap changer - v',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
self.assertEqual(net.num_vars,
2*(net.num_buses-net.get_num_slack_buses()) +
net.get_num_slack_gens() +
net.get_num_reg_gens() +
net.get_num_tap_changers_v() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts())
# Fixed
net.set_flags('branch',
'fixed',
'tap changer - v',
'tap ratio')
net.set_flags('branch',
'fixed',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'fixed',
'switching - v',
'susceptance')
self.assertEqual(net.num_fixed,
net.get_num_tap_changers_v() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts())
# Constraints
p.add_constraint(pf.Constraint('AC power balance', net))
p.add_constraint(pf.Constraint('generator active power participation', net))
p.add_constraint(pf.Constraint('PVPQ switching', net))
p.add_constraint(pf.Constraint('variable fixing', net))
self.assertEqual(len(p.constraints), 4)
# Heuristics
p.add_heuristic(pf.Heuristic('PVPQ switching', net))
self.assertEqual(len(p.heuristics), 1)
# Check adding redundant constraints
p.add_constraint(pf.Constraint('generator active power participation',net))
self.assertEqual(len(p.constraints),4)
# Functions
self.assertEqual(len(p.functions),0)
# Init point
x0 = p.get_init_point()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
self.assertTrue(np.all(x0 == p.x))
# Before
phi = p.phi
gphi = p.gphi
Hphi = p.Hphi
f = p.f
b = p.b
A = p.A
J = p.J
self.assertTrue(type(phi) is float)
self.assertEqual(phi,0.)
self.assertTrue(type(gphi) is np.ndarray)
self.assertTupleEqual(gphi.shape,(0,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(Hphi) is coo_matrix)
self.assertTupleEqual(Hphi.shape,(0,0))
self.assertEqual(Hphi.nnz,0)
self.assertTrue(np.all(Hphi.row >= Hphi.col))
p.analyze()
p.eval(x0)
# After
phi = p.phi
gphi = p.gphi.copy()
Hphi = p.Hphi.copy()
f = p.f.copy()
b = p.b.copy()
A = p.A.copy()
J = p.J.copy()
# Numbers
self.assertEqual(x0.size,p.num_primal_variables)
self.assertEqual(A.shape[0],p.num_linear_equality_constraints)
self.assertEqual(f.size,p.num_nonlinear_equality_constraints)
self.assertEqual(p.num_primal_variables,p.get_num_primal_variables())
self.assertEqual(p.num_linear_equality_constraints,p.get_num_linear_equality_constraints())
self.assertEqual(p.num_nonlinear_equality_constraints,p.get_num_nonlinear_equality_constraints())
# phi
self.assertTrue(type(phi) is float)
self.assertEqual(phi,0.)
# gphi
self.assertTrue(type(gphi) is np.ndarray)
self.assertTupleEqual(gphi.shape,(net.num_vars,))
self.assertLess(norm(gphi),1e-10)
# Hphi
self.assertTrue(type(Hphi) is coo_matrix)
self.assertTupleEqual(Hphi.shape,(net.num_vars,net.num_vars))
self.assertEqual(Hphi.nnz,0)
# f
self.assertTrue(type(f) is np.ndarray)
f_size = sum(c.f.shape[0] for c in p.constraints)
self.assertTupleEqual(f.shape,(f_size,))
# b
self.assertTrue(type(b) is np.ndarray)
b_size = sum(c.b.shape[0] for c in p.constraints)
self.assertTupleEqual(b.shape,(b_size,))
# J
self.assertTrue(type(J) is coo_matrix)
J_size = sum(c.J.shape[0] for c in p.constraints)
self.assertTupleEqual(J.shape,(J_size,net.num_vars))
self.assertGreater(J.nnz,0)
# A
self.assertTrue(type(A) is coo_matrix)
A_size = sum(c.A.shape[0] for c in p.constraints)
self.assertTupleEqual(A.shape,(A_size,net.num_vars))
self.assertGreater(A.nnz,0)
# Check J
f0 = f.copy()
J0 = J.copy()
for i in range(NUM_TRIALS):
d = np.random.randn(net.num_vars)
x = x0 + h*d
p.eval(x)
f1 = p.f
Jd_exact = J0*d
Jd_approx = (f1-f0)/h
error = 100.*norm(Jd_exact-Jd_approx)/np.maximum(norm(Jd_exact),TOL)
self.assertLessEqual(error,EPS)
# Check Hcombined
coeff = np.random.randn(f.shape[0])
p.eval(x0)
self.assertRaises(pf.ProblemError,p.combine_H,np.zeros(f.shape[0]+1),False)
p.combine_H(coeff,False)
J0 = p.J.copy()
g0 = J0.T*coeff
H0 = p.H_combined.copy()
self.assertTrue(type(H0) is coo_matrix)
self.assertTupleEqual(H0.shape,(net.num_vars,net.num_vars))
self.assertTrue(np.all(H0.row >= H0.col)) # lower triangular
H0 = (H0 + H0.T) - triu(H0)
for i in range(NUM_TRIALS):
d = np.random.randn(net.num_vars)
x = x0 + h*d
p.eval(x)
g1 = p.J.T*coeff
Hd_exact = H0*d
Hd_approx = (g1-g0)/h
error = 100.*norm(Hd_exact-Hd_approx)/np.maximum(norm(Hd_exact),TOL)
self.assertLessEqual(error,EPS)
# Sensitivities
net.clear_sensitivities()
for i in range(net.num_buses):
bus = net.get_bus(i)
self.assertEqual(bus.sens_P_balance,0.)
self.assertEqual(bus.sens_Q_balance,0.)
sens = np.random.randn(p.f.size)
offset = 0
for c in p.constraints:
if c.name == 'AC power balance':
break
else:
offset += c.f.size
p.store_sensitivities(np.zeros(p.A.shape[0]),sens,None,None)
for i in range(net.num_buses):
bus = net.get_bus(i)
self.assertEqual(bus.sens_P_balance,sens[bus.index_P+offset])
self.assertEqual(bus.sens_Q_balance,sens[bus.index_Q+offset])
self.assertRaises(pf.ProblemError,
p.store_sensitivities,
np.zeros(p.A.shape[0]),
np.zeros(p.f.size+5),
None,
None)
# Heuristics
self.assertEqual(len(p.heuristics), 1)
self.assertEqual(p.heuristics[0].name, "PVPQ switching")
p.apply_heuristics(x0)
def test_problem_vPF(self):
# Constants
h = 1e-9
for case in test_cases.CASES:
net = pf.Parser(case).parse(case)
self.assertEqual(net.num_periods,1)
p = pf.Problem(net)
# Variables
net.set_flags('bus',
'variable',
'not slack',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
net.set_flags('branch',
'variable',
'tap changer - v',
['tap ratio'])
net.set_flags('shunt',
'variable',
'switching - v',
['susceptance'])
reg_by_tran_or_shunt = 0
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_regulated_by_tran() or bus.is_regulated_by_shunt():
reg_by_tran_or_shunt += 1
self.assertEqual(net.num_vars,
2*(net.num_buses-net.get_num_slack_buses()) +
net.get_num_slack_gens() +
net.get_num_reg_gens() +
net.get_num_tap_changers_v()+
net.get_num_switched_v_shunts())
# Constraints
p.add_constraint(pf.Constraint('AC power balance',net))
p.add_constraint(pf.Constraint('voltage set point regulation',net))
p.add_constraint(pf.Constraint('voltage regulation by transformers',net))
p.add_constraint(pf.Constraint('voltage regulation by shunts',net))
self.assertEqual(len(p.constraints),4)
# Check adding redundant constraints
p.add_constraint(pf.Constraint('AC power balance',net))
self.assertEqual(len(p.constraints),4)
# Functions
p.add_function(pf.Function('voltage magnitude regularization',1.,net))
p.add_function(pf.Function('voltage angle regularization',5.,net))
p.add_function(pf.Function('generator powers regularization',8.,net))
p.add_function(pf.Function('tap ratio regularization',3.,net))
p.add_function(pf.Function('susceptance regularization',1.,net))
self.assertEqual(len(p.functions),5)
# Before
phi = p.phi
gphi = p.gphi
Hphi = p.Hphi
f = p.f
b = p.b
A = p.A
J = p.J
self.assertTrue(type(phi) is float)
self.assertEqual(phi,0.)
self.assertTrue(type(gphi) is np.ndarray)
self.assertTupleEqual(gphi.shape,(0,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(Hphi) is coo_matrix)
self.assertTupleEqual(Hphi.shape,(0,0))
self.assertEqual(Hphi.nnz,0)
self.assertTrue(np.all(Hphi.row >= Hphi.col))
p.analyze()
# Init point
r = np.random.randn(p.get_num_primal_variables())
x0 = p.get_init_point()+r
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars+p.num_extra_vars,))
self.assertTrue(np.all(x0 == p.x+r))
p.eval(x0)
# After
phi = p.phi
gphi = p.gphi.copy()
Hphi = p.Hphi.copy()
f = p.f.copy()
b = p.b.copy()
A = p.A.copy()
J = p.J.copy()
# Numbers
self.assertEqual(x0.size,p.num_primal_variables)
self.assertEqual(A.shape[0],p.num_linear_equality_constraints)
self.assertEqual(f.size,p.num_nonlinear_equality_constraints)
# phi
self.assertTrue(type(phi) is float)
self.assertGreater(phi,0.)
man_phi = sum(f.weight*f.phi for f in p.functions)
self.assertLess(np.abs(man_phi-phi),1e-10)
# gphi
self.assertTrue(type(gphi) is np.ndarray)
self.assertTupleEqual(gphi.shape,(net.num_vars+p.num_extra_vars,))
man_gphi = sum(f.weight*f.gphi for f in p.functions)
self.assertLess(norm(np.hstack((man_gphi,np.zeros(p.num_extra_vars)))-gphi),1e-10)
# Hphi
self.assertTrue(type(Hphi) is coo_matrix)
self.assertTupleEqual(Hphi.shape,(net.num_vars+p.num_extra_vars,
net.num_vars+p.num_extra_vars))
self.assertGreater(Hphi.nnz,0)
# f
self.assertTrue(type(f) is np.ndarray)
f_size = sum(c.f.shape[0] for c in p.constraints)
self.assertTupleEqual(f.shape,(f_size,))
# b
self.assertTrue(type(b) is np.ndarray)
b_size = sum(c.b.shape[0] for c in p.constraints)
self.assertTupleEqual(b.shape,(b_size,))
# J
self.assertTrue(type(J) is coo_matrix)
J_size = sum(c.J.shape[0] for c in p.constraints)
self.assertTupleEqual(J.shape,(J_size,net.num_vars+p.num_extra_vars))
self.assertGreater(J.nnz,0)
# A
self.assertTrue(type(A) is coo_matrix)
A_size = sum(c.A.shape[0] for c in p.constraints)
self.assertTupleEqual(A.shape,(A_size,net.num_vars+p.num_extra_vars))
self.assertGreater(A.nnz,0)
# Check gphi
phi0 = phi
gphi0 = gphi.copy()
for i in range(NUM_TRIALS):
d = | np.random.randn(net.num_vars+p.num_extra_vars) | numpy.random.randn |
"""
This code is based on https://github.com/ekwebb/fNRI which in turn is based on https://github.com/ethanfetaya/NRI
(MIT licence)
"""
import numpy as np
import torch
from torch.utils.data.dataset import TensorDataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.autograd import Variable
from itertools import permutations, chain
from math import factorial
from os import path
def my_softmax(input, axis=1):
trans_input = input.transpose(axis, 0).contiguous()
soft_max_1d = F.softmax(trans_input, dim=0) # added dim=0 as implicit choice is deprecated, dim 0 is edgetype due to transpose
return soft_max_1d.transpose(axis, 0)
def binary_concrete(logits, tau=1, hard=False, eps=1e-10):
y_soft = binary_concrete_sample(logits, tau=tau, eps=eps)
if hard:
y_hard = (y_soft > 0.5).float()
y = Variable(y_hard.data - y_soft.data) + y_soft
else:
y = y_soft
return y
def binary_concrete_sample(logits, tau=1, eps=1e-10):
logistic_noise = sample_logistic(logits.size(), eps=eps)
if logits.is_cuda:
logistic_noise = logistic_noise.cuda()
y = logits + Variable(logistic_noise)
return F.sigmoid(y / tau)
def sample_logistic(shape, eps=1e-10):
uniform = torch.rand(shape).float()
return torch.log(uniform + eps) - torch.log(1 - uniform + eps)
def sample_gumbel(shape, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).float()
return - torch.log(eps - torch.log(U + eps))
def gumbel_softmax_sample(logits, tau=1, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/3<PASSWORD>
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
gumbel_noise = sample_gumbel(logits.size(), eps=eps)
if logits.is_cuda:
gumbel_noise = gumbel_noise.cuda()
y = logits + Variable(gumbel_noise)
return my_softmax(y / tau, axis=-1)
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcf<PASSWORD>9<PASSWORD>3
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
tau: non-negative scalar temperature
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
def my_sigmoid(logits, hard=True, sharpness=1.0):
edges_soft = 1/(1+torch.exp(-sharpness*logits))
if hard:
edges_hard = torch.round(edges_soft)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
if edges_soft.is_cuda:
edges_hard = edges_hard.cuda()
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
edges = Variable(edges_hard - edges_soft.data) + edges_soft
else:
edges = edges_soft
return edges
def binary_accuracy(output, labels):
preds = output > 0.5
correct = preds.type_as(labels).eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def edge_type_encode(edges): # this is used to gives each 'interaction strength' a unique integer = 0, 1, 2 ..
unique = np.unique(edges)
encode = np.zeros(edges.shape)
for i in range(unique.shape[0]):
encode += np.where( edges == unique[i], i, 0)
return encode
def loader_edges_encode(edges, num_atoms):
edges = np.reshape(edges, [edges.shape[0], edges.shape[1], num_atoms ** 2])
edges = np.array(edge_type_encode(edges), dtype=np.int64)
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
[num_atoms, num_atoms])
edges = edges[:,:, off_diag_idx]
return edges
def loader_combine_edges(edges):
edge_types_list = [ int(np.max(edges[:,i,:]))+1 for i in range(edges.shape[1]) ]
assert( edge_types_list == sorted(edge_types_list)[::-1] )
encoded_target = np.zeros( edges[:,0,:].shape )
base = 1
for i in reversed(range(edges.shape[1])):
encoded_target += base*edges[:,i,:]
base *= edge_types_list[i]
return encoded_target.astype('int')
def load_data_NRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode(edges_train, num_atoms)
edges_valid = loader_edges_encode(edges_valid, num_atoms)
edges_test = loader_edges_encode(edges_test, num_atoms)
edges_train = loader_combine_edges(edges_train)
edges_valid = loader_combine_edges(edges_valid)
edges_test = loader_combine_edges(edges_test)
feat_train = torch.FloatTensor(feat_train)
edges_train = torch.LongTensor(edges_train)
feat_valid = torch.FloatTensor(feat_valid)
edges_valid = torch.LongTensor(edges_valid)
feat_test = torch.FloatTensor(feat_test)
edges_test = torch.LongTensor(edges_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def load_data_fNRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode( edges_train, num_atoms )
edges_valid = loader_edges_encode( edges_valid, num_atoms )
edges_test = loader_edges_encode( edges_test, num_atoms )
edges_train = torch.LongTensor(edges_train)
edges_valid = torch.LongTensor(edges_valid)
edges_test = torch.LongTensor(edges_test)
feat_train = torch.FloatTensor(feat_train)
feat_valid = torch.FloatTensor(feat_valid)
feat_test = torch.FloatTensor(feat_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def to_2d_idx(idx, num_cols):
idx = np.array(idx, dtype=np.int64)
y_idx = np.array(np.floor(idx / float(num_cols)), dtype=np.int64)
x_idx = idx % num_cols
return x_idx, y_idx
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def get_triu_indices(num_nodes):
"""Linear triu (upper triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
triu_indices = (ones.triu() - eye).nonzero().t()
triu_indices = triu_indices[0] * num_nodes + triu_indices[1]
return triu_indices
def get_tril_indices(num_nodes):
"""Linear tril (lower triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
tril_indices = (ones.tril() - eye).nonzero().t()
tril_indices = tril_indices[0] * num_nodes + tril_indices[1]
return tril_indices
def get_offdiag_indices(num_nodes):
"""Linear off-diagonal indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
offdiag_indices = (ones - eye).nonzero().t()
offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]
return offdiag_indices
def get_triu_offdiag_indices(num_nodes):
"""Linear triu (upper) indices w.r.t. vector of off-diagonal elements."""
triu_idx = torch.zeros(num_nodes * num_nodes)
triu_idx[get_triu_indices(num_nodes)] = 1.
triu_idx = triu_idx[get_offdiag_indices(num_nodes)]
return triu_idx.nonzero()
def get_tril_offdiag_indices(num_nodes):
"""Linear tril (lower) indices w.r.t. vector of off-diagonal elements."""
tril_idx = torch.zeros(num_nodes * num_nodes)
tril_idx[get_tril_indices(num_nodes)] = 1.
tril_idx = tril_idx[get_offdiag_indices(num_nodes)]
return tril_idx.nonzero()
def get_minimum_distance(data):
data = data[:, :, :, :2].transpose(1, 2)
data_norm = (data ** 2).sum(-1, keepdim=True)
dist = data_norm + \
data_norm.transpose(2, 3) - \
2 * torch.matmul(data, data.transpose(2, 3))
min_dist, _ = dist.min(1)
return min_dist.view(min_dist.size(0), -1)
def get_buckets(dist, num_buckets):
dist = dist.cpu().data.numpy()
min_dist = np.min(dist)
max_dist = np.max(dist)
bucket_size = (max_dist - min_dist) / num_buckets
thresholds = bucket_size * np.arange(num_buckets)
bucket_idx = []
for i in range(num_buckets):
if i < num_buckets - 1:
idx = np.where(np.all(np.vstack((dist > thresholds[i],
dist <= thresholds[i + 1])), 0))[0]
else:
idx = np.where(dist > thresholds[i])[0]
bucket_idx.append(idx)
return bucket_idx, thresholds
def get_correct_per_bucket(bucket_idx, pred, target):
pred = pred.cpu().numpy()[:, 0]
target = target.cpu().data.numpy()
correct_per_bucket = []
for i in range(len(bucket_idx)):
preds_bucket = pred[bucket_idx[i]]
target_bucket = target[bucket_idx[i]]
correct_bucket = np.sum(preds_bucket == target_bucket)
correct_per_bucket.append(correct_bucket)
return correct_per_bucket
def get_correct_per_bucket_(bucket_idx, pred, target):
pred = pred.cpu().numpy()
target = target.cpu().data.numpy()
correct_per_bucket = []
for i in range(len(bucket_idx)):
preds_bucket = pred[bucket_idx[i]]
target_bucket = target[bucket_idx[i]]
correct_bucket = np.sum(preds_bucket == target_bucket)
correct_per_bucket.append(correct_bucket)
return correct_per_bucket
def kl_categorical(preds, log_prior, num_atoms, eps=1e-16):
kl_div = preds * (torch.log(preds + eps) - log_prior)
return kl_div.sum() / (num_atoms * preds.size(0)) # normalisation here is (batch * num atoms)
def kl_categorical_uniform(preds, num_atoms, num_edge_types, add_const=False,
eps=1e-16):
kl_div = preds * torch.log(preds + eps)
if add_const:
const = np.log(num_edge_types)
kl_div += const
return kl_div.sum() / (num_atoms * preds.size(0))
def kl_categorical_uniform_var(preds, num_atoms, num_edge_types, add_const=False,
eps=1e-16):
kl_div = preds * torch.log(preds + eps)
if add_const:
const = np.log(num_edge_types)
kl_div += const
return (kl_div.sum(dim=1) / num_atoms).var()
def nll_gaussian(preds, target, variance, add_const=False):
neg_log_p = ((preds - target) ** 2 / (2 * variance))
if add_const:
const = 0.5 * np.log(2 * np.pi * variance)
neg_log_p += const
return neg_log_p.sum() / (target.size(0) * target.size(1)) # normalisation here is (batch * num atoms)
def nll_gaussian_var(preds, target, variance, add_const=False):
# returns the variance over the batch of the reconstruction loss
neg_log_p = ((preds - target) ** 2 / (2 * variance))
if add_const:
const = 0.5 * np.log(2 * np.pi * variance)
neg_log_p += const
return (neg_log_p.sum(dim=1)/target.size(1)).var()
# Loss function for the case of variable sigma, input sigma must have same shape as preds i.e. [batchsize, no. of atoms, no. of time steps, no. of phase space coords (x,y,vx,vy)]
def nll_gaussian_variablesigma(preds, target, logsigma, add_const=True):
# cutoff to ensure it does not become infinite
if (torch.min(logsigma) < -pow(10, 7)):
accuracy = np.full(
(logsigma.size(0), logsigma.size(1), logsigma.size(2), logsigma.size(3)),
-pow(10, 7), dtype=np.float32)
accuracy = torch.from_numpy(accuracy)
if preds.is_cuda:
accuracy = accuracy.cuda()
logsigma = torch.max(logsigma, accuracy)
neg_log_p = (((preds - target) ** 2 )*torch.exp(-logsigma)) / 2
# neg_log_p = ((preds - target) ** 2 - 0.0000001 / (2 * variance))
# neg_log_p = ((preds - target) ** 2 / (2 * variance))- 0.0000001/ sigma
loss_1 = neg_log_p
loss_2 = 0.0
if add_const:
const = (0.5 * logsigma)
neg_log_p = neg_log_p + const
loss_2 += const
return neg_log_p.sum() / (target.size(0) * target.size(1)), loss_1.sum() / (target.size(0) * target.size(1)) , loss_2.sum() / (target.size(0) * target.size(1)) # normalisation here is (batch * num atoms)
def nll_gaussian_var__variablesigma(preds, target, logsigma, add_const=True):
# returns the variance over the batch of the reconstruction loss
# cutoff to ensure it does not become infinite
if (torch.min(logsigma) < -pow(10, 7)):
accuracy = np.full(
(logsigma.size(0), logsigma.size(1), logsigma.size(2), logsigma.size(3)),
-pow(10, 7), dtype=np.float32)
accuracy = torch.from_numpy(accuracy)
if preds.is_cuda:
accuracy = accuracy.cuda()
logsigma = torch.max(logsigma, accuracy)
neg_log_p = (((preds - target) ** 2) * torch.exp(-logsigma)) / 2
# neg_log_p = ((preds - target) ** 2 - 0.0000001 / (2 * variance))
# neg_log_p = ((preds - target) ** 2 / (2 * variance))- 0.0000001/ sigma
if add_const:
const = (0.5 * logsigma)
neg_log_p = neg_log_p + const
return (neg_log_p.sum(dim=1)/target.size(1)).var()
# Loss function for the case of variable sigma Laplace distribution, input sigma must have same shape as preds i.e. [batchsize, no. of atoms, no. of time steps, no. of phase space coords (x,y,vx,vy)]
def nll_laplace_variablesigma(preds, target, sigma, add_const=True):
variance = sigma ** 2
neg_log_p = torch.abs((preds - target)) / sigma
loss_1 = neg_log_p
loss_2 = 0.0
if add_const:
const = (torch.log(2* sigma))
neg_log_p = neg_log_p + const
loss_2 += const
return neg_log_p.sum() / (target.size(0) * target.size(1)), loss_1.sum() / (target.size(0) * target.size(1)) , loss_2 / (target.size(0) * target.size(1)) # normalisation here is (batch * num atoms)
def nll_laplace_var__variablesigma(preds, target, sigma, add_const=True):
# returns the variance over the batch of the reconstruction loss
variance = sigma ** 2
neg_log_p = torch.abs((preds - target)) / sigma
loss_1 = neg_log_p
loss_2 = 0.0
if add_const:
const = (torch.log(2 * sigma))
neg_log_p = neg_log_p + const
loss_2 += const
return (neg_log_p.sum(dim=1)/target.size(1)).var()
# Loss function for the case of variable sigma Student's distribution, input sigma must have same shape as preds i.e. [batchsize, no. of atoms, no. of time steps, no. of phase space coords (x,y,vx,vy)]
def nll_students_variablesigma(preds, target, sigma, add_const=True):
sigmasquared = sigma ** 2
neg_log_p = torch.log((preds - target) ** 2 + sigmasquared)
loss_1 = neg_log_p
loss_2 = 0.0
if add_const:
const = -(torch.log(sigma))
neg_log_p = neg_log_p + const
loss_2 += const
return neg_log_p.sum() / (target.size(0) * target.size(1)), loss_1.sum() / (target.size(0) * target.size(1)) , loss_2 / (target.size(0) * target.size(1)) # normalisation here is (batch * num atoms)
def nll_students_var__variablesigma(preds, target, sigma, add_const=True):
# returns the variance over the batch of the reconstruction loss
sigmasquared = sigma ** 2
neg_log_p = torch.log((preds - target) ** 2 + sigmasquared)
loss_1 = neg_log_p
loss_2 = 0.0
if add_const:
const = -(torch.log(sigma))
neg_log_p = neg_log_p + const
loss_2 += const
return (neg_log_p.sum(dim=1)/target.size(1)).var()
# Loss function for the case of variable sigma multivariate case, input sigma must have same shape as preds i.e. [batchsize, no. of atoms, no. of time steps, no. of phase space coords (x,y,vx,vy)]
def nll_gaussian_multivariatesigma(preds, target, sigma, accel, add_const=True):
# get normalised vectors for acceleration and velocities v|| and a||
indices = torch.LongTensor([2,3])
if preds.is_cuda:
indices = indices.cuda()
velocities = torch.index_select(preds, 3, indices)
velnorm = velocities.norm(p=2, dim = 3, keepdim = True)
normalisedvel = velocities.div(velnorm.expand_as(velocities))
accelnorm = accel.norm(p=2, dim = 3, keepdim = True)
normalisedaccel = accel.div(accelnorm.expand_as(accel))
# get perpendicular components to the accelerations and velocities accelperp, velperp
# note in 2D perpendicular vector is just rotation by pi/2 about origin (x,y) -> (-y,x)
rotationmatrix = np.zeros((velocities.size(0), velocities.size(1), velocities.size(2),2,2), dtype=np.float32)
for i in range(len(rotationmatrix)):
for j in range(len(rotationmatrix[i])):
for l in range(len(rotationmatrix[i][j])):
rotationmatrix[i][j][l][0][1] = np.float32(-1)
rotationmatrix[i][j][l][1][0] = np.float32(1)
rotationmatrix = torch.from_numpy(rotationmatrix)
if preds.is_cuda:
rotationmatrix = rotationmatrix.cuda()
velperp = torch.matmul(rotationmatrix, normalisedvel.unsqueeze(4))
velperp = velperp.squeeze()
accelperp = torch.matmul(rotationmatrix, normalisedaccel.unsqueeze(4))
accelperp = accelperp.squeeze()
# need Sigma=Sigma^2, Sigma^-1 and det(Sigma)
variance = sigma ** 2
determinant = torch.prod(variance, 3).unsqueeze(3)
inversevariance = variance ** -1
# in order for us to use simple methods need 1*4, 4*4, 4*4, 4*4, 4*1 tensors for each batch etc.
differences = preds-target
differencestranspose = differences.unsqueeze(3) # (x-mu)^T
differences = differences.unsqueeze(4) # (x-mu)
sigmadiag = torch.diag_embed(inversevariance,offset=0) # 4*4 diagonal variance matrix
unitarytransform = np.zeros((normalisedvel.size(0), normalisedvel.size(1), normalisedvel.size(2),4,4), dtype = np.float32)
# assumes the first term has isotropic uncertainty- no uncertainty introduced yet and Sigma0_t~v_(t-1) hence the l+1
# below
for i in range(len(unitarytransform)):
for j in range(len(unitarytransform[i])):
unitarytransform[i][j][0][0][0] = 1
unitarytransform[i][j][0][1][0] = 0
unitarytransform[i][j][0][0][1] = 0
unitarytransform[i][j][0][1][1] = 1
unitarytransform[i][j][0][2][2] = 1
unitarytransform[i][j][0][3][2] = 0
unitarytransform[i][j][0][2][3] = 0
unitarytransform[i][j][0][3][3] = 1
# gets unitary transformation with offset of 1 in time domain as explained above.
for i in range(len(unitarytransform)):
for j in range(len(unitarytransform[i])):
for l in range(len(unitarytransform[i][j])-1):
unitarytransform[i][j][l+1][0][0] = normalisedvel.detach().cpu().numpy()[i][j][l][0]
unitarytransform[i][j][l+1][1][0] = normalisedvel.detach().cpu().numpy()[i][j][l][1]
unitarytransform[i][j][l+1][0][1] = velperp.detach().cpu().numpy()[i][j][l][0]
unitarytransform[i][j][l+1][1][1] = velperp.detach().cpu().numpy()[i][j][l][1]
unitarytransform[i][j][l+1][2][2] = normalisedaccel.detach().cpu().numpy()[i][j][l][0]
unitarytransform[i][j][l+1][3][2] = normalisedaccel.detach().cpu().numpy()[i][j][l][1]
unitarytransform[i][j][l+1][2][3] = accelperp.detach().cpu().numpy()[i][j][l][0]
unitarytransform[i][j][l+1][3][3] = accelperp.detach().cpu().numpy()[i][j][l][1]
# U
unitarytransform = torch.from_numpy(unitarytransform)
if preds.is_cuda:
unitarytransform = unitarytransform.cuda()
# U^-1
unitarytransforminverse = torch.inverse(unitarytransform)
# L= 1/2(ln(detSigma)+(x-mu)^TUSigma^(-1)U^(-1)(x-mu)) + const where const is unimportant
neg_log_p_1 = torch.matmul(unitarytransforminverse, differences)
neg_log_p_2 = torch.matmul(sigmadiag, neg_log_p_1)
neg_log_p_3 = torch.matmul(unitarytransform, neg_log_p_2)
neg_log_p_4 = torch.matmul(differencestranspose, neg_log_p_3).squeeze()
neg_log_p_4 = (neg_log_p_4 * 0.5)
loss_1 = neg_log_p_4
loss_2 = 0.0
if add_const:
const = (0.5 * torch.log(2*np.pi* determinant))
neg_log_p_4 = neg_log_p_4 + const
loss_2 += const
return neg_log_p_4.sum() / (target.size(0) * target.size(1)), loss_1.sum() / (target.size(0) * target.size(1)) , loss_2 / (target.size(0) * target.size(1)) # normalisation here is (batch * num atoms)
def nll_gaussian_var_multivariatesigma(preds, target, sigma, accel, add_const=True):
# returns the variance over the batch of the reconstruction loss
# get normalised vectors for acceleration and velocities v|| and a||
indices = torch.LongTensor([2, 3])
if preds.is_cuda:
indices = indices.cuda()
velocities = torch.index_select(preds, 3, indices)
velnorm = velocities.norm(p=2, dim=3, keepdim=True)
normalisedvel = velocities.div(velnorm.expand_as(velocities))
accelnorm = accel.norm(p=2, dim=3, keepdim=True)
normalisedaccel = accel.div(accelnorm.expand_as(accel))
# get perpendicular components to the accelerations and velocities accelperp, velperp
# note in 2D perpendicular vector is just rotation by pi/2 about origin (x,y) -> (-y,x)
rotationmatrix = np.zeros((velocities.size(0), velocities.size(1), velocities.size(2), 2, 2), dtype=np.float32)
for i in range(len(rotationmatrix)):
for j in range(len(rotationmatrix[i])):
for l in range(len(rotationmatrix[i][j])):
rotationmatrix[i][j][l][0][1] = np.float32(-1)
rotationmatrix[i][j][l][1][0] = np.float32(1)
rotationmatrix = torch.from_numpy(rotationmatrix)
if preds.is_cuda:
rotationmatrix = rotationmatrix.cuda()
velperp = torch.matmul(rotationmatrix, normalisedvel.unsqueeze(4))
velperp = velperp.squeeze()
accelperp = torch.matmul(rotationmatrix, normalisedaccel.unsqueeze(4))
accelperp = accelperp.squeeze()
# need Sigma=Sigma^2, Sigma^-1 and det(Sigma)
variance = sigma ** 2
determinant = torch.prod(variance, 3).unsqueeze(3)
inversevariance = variance ** -1
# in order for us to use simple methods need 1*4, 4*4, 4*4, 4*4, 4*1 tensors for each batch etc.
differences = preds - target
differencestranspose = differences.unsqueeze(3) # (x-mu)^T
differences = differences.unsqueeze(4) # (x-mu)
sigmadiag = torch.diag_embed(inversevariance, offset=0) # 4*4 diagonal variance matrix
unitarytransform = np.zeros((normalisedvel.size(0), normalisedvel.size(1), normalisedvel.size(2), 4, 4),
dtype=np.float32)
# assumes the first term has isotropic uncertainty- no uncertainty introduced yet and Sigma0_t~v_(t-1) hence the l+1
# below
for i in range(len(unitarytransform)):
for j in range(len(unitarytransform[i])):
unitarytransform[i][j][0][0][0] = 1
unitarytransform[i][j][0][1][0] = 0
unitarytransform[i][j][0][0][1] = 0
unitarytransform[i][j][0][1][1] = 1
unitarytransform[i][j][0][2][2] = 1
unitarytransform[i][j][0][3][2] = 0
unitarytransform[i][j][0][2][3] = 0
unitarytransform[i][j][0][3][3] = 1
# gets unitary transformation with offset of 1 in time domain as explained above.
for i in range(len(unitarytransform)):
for j in range(len(unitarytransform[i])):
for l in range(len(unitarytransform[i][j]) - 1):
unitarytransform[i][j][l + 1][0][0] = normalisedvel.detach().cpu().numpy()[i][j][l][0]
unitarytransform[i][j][l + 1][1][0] = normalisedvel.detach().cpu().numpy()[i][j][l][1]
unitarytransform[i][j][l + 1][0][1] = velperp.detach().cpu().numpy()[i][j][l][0]
unitarytransform[i][j][l + 1][1][1] = velperp.detach().cpu().numpy()[i][j][l][1]
unitarytransform[i][j][l + 1][2][2] = normalisedaccel.detach().cpu().numpy()[i][j][l][0]
unitarytransform[i][j][l + 1][3][2] = normalisedaccel.detach().cpu().numpy()[i][j][l][1]
unitarytransform[i][j][l + 1][2][3] = accelperp.detach().cpu().numpy()[i][j][l][0]
unitarytransform[i][j][l + 1][3][3] = accelperp.detach().cpu().numpy()[i][j][l][1]
# U
unitarytransform = torch.from_numpy(unitarytransform)
if preds.is_cuda:
unitarytransform = unitarytransform.cuda()
# U^-1
unitarytransforminverse = torch.inverse(unitarytransform)
# L= 1/2(ln(detSigma)+(x-mu)^TUSigma^(-1)U^(-1)(x-mu)) + const where const is unimportant
neg_log_p_1 = torch.matmul(unitarytransforminverse, differences)
neg_log_p_2 = torch.matmul(sigmadiag, neg_log_p_1)
neg_log_p_3 = torch.matmul(unitarytransform, neg_log_p_2)
neg_log_p_4 = torch.matmul(differencestranspose, neg_log_p_3).squeeze()
neg_log_p_4 = (neg_log_p_4 * 0.5)
loss_1 = neg_log_p_4
loss_2 = 0.0
if add_const:
const = (0.5 * torch.log(2 * np.pi * determinant)).sum().detach().cpu().numpy().item(0)
neg_log_p_4 += const
loss_2 += const
return (neg_log_p_4.sum(dim=1)/target.size(1)).var()
def nll_gaussian_multivariatesigma_efficient(preds, target, sigma, accel, add_const=True):
# get normalised vectors for acceleration and velocities v|| and a||
indices = torch.LongTensor([2,3])
if preds.is_cuda:
indices = indices.cuda()
velocities = torch.index_select(preds, 3, indices)
velnorm = velocities.norm(p=2, dim = 3, keepdim = True)
normalisedvel = velocities.div(velnorm.expand_as(velocities))
accelnorm = accel.norm(p=2, dim = 3, keepdim = True)
normalisedaccel = accel.div(accelnorm.expand_as(accel))
# get perpendicular components to the accelerations and velocities accelperp, velperp
# note in 2D perpendicular vector is just rotation by pi/2 about origin (x,y) -> (-y,x)
rotationmatrix = np.zeros((velocities.size(0), velocities.size(1), velocities.size(2),2,2), dtype=np.float32)
for i in range(len(rotationmatrix)):
for j in range(len(rotationmatrix[i])):
for l in range(len(rotationmatrix[i][j])):
rotationmatrix[i][j][l][0][1] = np.float32(-1)
rotationmatrix[i][j][l][1][0] = np.float32(1)
rotationmatrix = torch.from_numpy(rotationmatrix)
if preds.is_cuda:
rotationmatrix = rotationmatrix.cuda()
velperp = torch.matmul(rotationmatrix, normalisedvel.unsqueeze(4))
velperp = velperp.squeeze()
accelperp = torch.matmul(rotationmatrix, normalisedaccel.unsqueeze(4))
accelperp = accelperp.squeeze()
# need Sigma=Sigma^2, Sigma^-1 and det(Sigma)
variance = sigma ** 2
determinant = torch.prod(variance, 3).unsqueeze(3)
inversevariance = variance ** -1
# in order for us to use simple methods need 1*2, 2*2, 2*2, 2*2, 2*1 tensors for each batch etc.
differences = preds-target
indices_pos = torch.LongTensor([0,1])
indices_vel = torch.LongTensor([2,3])
if preds.is_cuda:
indices_pos, indices_vel = indices_pos.cuda(), indices_vel.cuda()
position_differences = torch.index_select(differences, 3, indices_pos)
velocity_differences = torch.index_select(differences, 3, indices_vel)
position_differencestranspose = position_differences.unsqueeze(3)
velocity_differencestranspose = velocity_differences.unsqueeze(3)# (x-mu)^T
position_differences = position_differences.unsqueeze(4)
velocity_differences = velocity_differences.unsqueeze(4)# (x-mu)
sigmadiag_position = torch.diag_embed(torch.index_select(inversevariance, 3, indices_pos), offset=0)
sigmadiag_velocity = torch.diag_embed(torch.index_select(inversevariance, 3, indices_vel), offset=0) # 2*2 diagonal variance matrix
unitarytransform_position = np.zeros((normalisedvel.size(0), normalisedvel.size(1), normalisedvel.size(2),2,2), dtype = np.float32)
unitarytransform_velocity = np.zeros((normalisedvel.size(0), normalisedvel.size(1), normalisedvel.size(2),2,2), dtype = np.float32)
# assumes the first term has isotropic uncertainty- no uncertainty introduced yet and Sigma0_t~v_(t-1) hence the l+1
# below
for i in range(len(unitarytransform_position)):
for j in range(len(unitarytransform_position[i])):
unitarytransform_position[i][j][0][0][0] = 1
unitarytransform_position[i][j][0][1][0] = 0
unitarytransform_position[i][j][0][0][1] = 0
unitarytransform_position[i][j][0][1][1] = 1
unitarytransform_velocity[i][j][0][0][0] = 1
unitarytransform_velocity[i][j][0][1][0] = 0
unitarytransform_velocity[i][j][0][0][1] = 0
unitarytransform_velocity[i][j][0][1][1] = 1
# gets unitary transformation with offset of 1 in time domain as explained above.
for i in range(len(unitarytransform_position)):
for j in range(len(unitarytransform_position[i])):
for l in range(len(unitarytransform_position[i][j])-1):
unitarytransform_position[i][j][l+1][0][0] = normalisedvel.detach().cpu().numpy()[i][j][l][0]
unitarytransform_position[i][j][l+1][1][0] = normalisedvel.detach().cpu().numpy()[i][j][l][1]
unitarytransform_position[i][j][l+1][0][1] = velperp.detach().cpu().numpy()[i][j][l][0]
unitarytransform_position[i][j][l+1][1][1] = velperp.detach().cpu().numpy()[i][j][l][1]
unitarytransform_velocity[i][j][l+1][0][0] = normalisedaccel.detach().cpu().numpy()[i][j][l][0]
unitarytransform_velocity[i][j][l+1][1][0] = normalisedaccel.detach().cpu().numpy()[i][j][l][1]
unitarytransform_velocity[i][j][l+1][0][1] = accelperp.detach().cpu().numpy()[i][j][l][0]
unitarytransform_velocity[i][j][l+1][1][1] = accelperp.detach().cpu().numpy()[i][j][l][1]
# U
unitarytransform_position = torch.from_numpy(unitarytransform_position)
unitarytransform_velocity = torch.from_numpy(unitarytransform_velocity)
if preds.is_cuda:
unitarytransform_position, unitarytransform_velocity = unitarytransform_position.cuda(), unitarytransform_velocity.cuda()
# U^-1
unitarytransforminverse_position = torch.inverse(unitarytransform_position)
unitarytransforminverse_velocity = torch.inverse(unitarytransform_velocity)
# L= 1/2(ln(detSigma)+(x-mu)^TUSigma^(-1)U^(-1)(x-mu)) + const where const is unimportant - sum of 2*2 matrix multiplications
neg_log_p_1 = torch.matmul(unitarytransforminverse_position, position_differences)
neg_log_p_2 = torch.matmul(sigmadiag_position, neg_log_p_1)
neg_log_p_3 = torch.matmul(unitarytransform_position, neg_log_p_2)
neg_log_p_4 = torch.matmul(position_differencestranspose, neg_log_p_3).squeeze()
neg_log_p_4 = (neg_log_p_4 * 0.5)
neg_log_v_1 = torch.matmul(unitarytransforminverse_velocity, velocity_differences)
neg_log_v_2 = torch.matmul(sigmadiag_velocity, neg_log_v_1)
neg_log_v_3 = torch.matmul(unitarytransform_velocity, neg_log_v_2)
neg_log_v_4 = torch.matmul(velocity_differencestranspose, neg_log_v_3).squeeze()
neg_log_v_4 = (neg_log_v_4 * 0.5)
loss_1 = neg_log_p_4 + neg_log_v_4
loss_2 = 0.0
if add_const:
const = (0.5 * torch.log(2*np.pi* determinant))
neg_log_p_4 = neg_log_p_4 + const
loss_2 += const
return (neg_log_p_4+neg_log_v_4).sum() / (target.size(0) * target.size(1)), loss_1.sum() / (target.size(0) * target.size(1)) , loss_2 / (target.size(0) * target.size(1)) # normalisation here is (batch * num atoms)
def nll_gaussian_var_multivariatesigma_efficient(preds, target, sigma, accel, add_const=True):
# returns the variance over the batch of the reconstruction loss
# get normalised vectors for acceleration and velocities v|| and a||
indices = torch.LongTensor([2, 3])
if preds.is_cuda:
indices = indices.cuda()
velocities = torch.index_select(preds, 3, indices)
velnorm = velocities.norm(p=2, dim=3, keepdim=True)
normalisedvel = velocities.div(velnorm.expand_as(velocities))
accelnorm = accel.norm(p=2, dim=3, keepdim=True)
normalisedaccel = accel.div(accelnorm.expand_as(accel))
# get perpendicular components to the accelerations and velocities accelperp, velperp
# note in 2D perpendicular vector is just rotation by pi/2 about origin (x,y) -> (-y,x)
rotationmatrix = np.zeros((velocities.size(0), velocities.size(1), velocities.size(2), 2, 2), dtype=np.float32)
for i in range(len(rotationmatrix)):
for j in range(len(rotationmatrix[i])):
for l in range(len(rotationmatrix[i][j])):
rotationmatrix[i][j][l][0][1] = np.float32(-1)
rotationmatrix[i][j][l][1][0] = np.float32(1)
rotationmatrix = torch.from_numpy(rotationmatrix)
if preds.is_cuda:
rotationmatrix = rotationmatrix.cuda()
velperp = torch.matmul(rotationmatrix, normalisedvel.unsqueeze(4))
velperp = velperp.squeeze()
accelperp = torch.matmul(rotationmatrix, normalisedaccel.unsqueeze(4))
accelperp = accelperp.squeeze()
# need Sigma=Sigma^2, Sigma^-1 and det(Sigma)
variance = sigma ** 2
determinant = torch.prod(variance, 3).unsqueeze(3)
inversevariance = variance ** -1
# in order for us to use simple methods need 1*2, 2*2, 2*2, 2*2, 2*1 tensors for each batch etc.
differences = preds - target
indices_pos = torch.LongTensor([0, 1])
indices_vel = torch.LongTensor([2, 3])
if preds.is_cuda:
indices_pos, indices_vel = indices_pos.cuda(), indices_vel.cuda()
position_differences = torch.index_select(differences, 3, indices_pos)
velocity_differences = torch.index_select(differences, 3, indices_vel)
position_differencestranspose = position_differences.unsqueeze(3)
velocity_differencestranspose = velocity_differences.unsqueeze(3) # (x-mu)^T
position_differences = position_differences.unsqueeze(4)
velocity_differences = velocity_differences.unsqueeze(4) # (x-mu)
sigmadiag_position = torch.diag_embed(torch.index_select(inversevariance, 3, indices_pos), offset=0)
sigmadiag_velocity = torch.diag_embed(torch.index_select(inversevariance, 3, indices_vel),
offset=0) # 2*2 diagonal variance matrix
unitarytransform_position = np.zeros((normalisedvel.size(0), normalisedvel.size(1), normalisedvel.size(2), 2, 2),
dtype=np.float32)
unitarytransform_velocity = np.zeros((normalisedvel.size(0), normalisedvel.size(1), normalisedvel.size(2), 2, 2),
dtype=np.float32)
# assumes the first term has isotropic uncertainty- no uncertainty introduced yet and Sigma0_t~v_(t-1) hence the l+1
# below
for i in range(len(unitarytransform_position)):
for j in range(len(unitarytransform_position[i])):
unitarytransform_position[i][j][0][0][0] = 1
unitarytransform_position[i][j][0][1][0] = 0
unitarytransform_position[i][j][0][0][1] = 0
unitarytransform_position[i][j][0][1][1] = 1
unitarytransform_velocity[i][j][0][0][0] = 1
unitarytransform_velocity[i][j][0][1][0] = 0
unitarytransform_velocity[i][j][0][0][1] = 0
unitarytransform_velocity[i][j][0][1][1] = 1
# gets unitary transformation with offset of 1 in time domain as explained above.
for i in range(len(unitarytransform_position)):
for j in range(len(unitarytransform_position[i])):
for l in range(len(unitarytransform_position[i][j]) - 1):
unitarytransform_position[i][j][l + 1][0][0] = normalisedvel.detach().cpu().numpy()[i][j][l][0]
unitarytransform_position[i][j][l + 1][1][0] = normalisedvel.detach().cpu().numpy()[i][j][l][1]
unitarytransform_position[i][j][l + 1][0][1] = velperp.detach().cpu().numpy()[i][j][l][0]
unitarytransform_position[i][j][l + 1][1][1] = velperp.detach().cpu().numpy()[i][j][l][1]
unitarytransform_velocity[i][j][l + 1][0][0] = normalisedaccel.detach().cpu().numpy()[i][j][l][0]
unitarytransform_velocity[i][j][l + 1][1][0] = normalisedaccel.detach().cpu().numpy()[i][j][l][1]
unitarytransform_velocity[i][j][l + 1][0][1] = accelperp.detach().cpu().numpy()[i][j][l][0]
unitarytransform_velocity[i][j][l + 1][1][1] = accelperp.detach().cpu().numpy()[i][j][l][1]
# U
unitarytransform_position = torch.from_numpy(unitarytransform_position)
unitarytransform_velocity = torch.from_numpy(unitarytransform_velocity)
if preds.is_cuda:
unitarytransform_position, unitarytransform_velocity = unitarytransform_position.cuda(), unitarytransform_velocity.cuda()
# U^-1
unitarytransforminverse_position = torch.inverse(unitarytransform_position)
unitarytransforminverse_velocity = torch.inverse(unitarytransform_velocity)
# L= 1/2(ln(detSigma)+(x-mu)^TUSigma^(-1)U^(-1)(x-mu)) + const where const is unimportant - sum of 2*2 matrix multiplications
neg_log_p_1 = torch.matmul(unitarytransforminverse_position, position_differences)
neg_log_p_2 = torch.matmul(sigmadiag_position, neg_log_p_1)
neg_log_p_3 = torch.matmul(unitarytransform_position, neg_log_p_2)
neg_log_p_4 = torch.matmul(position_differencestranspose, neg_log_p_3).squeeze()
neg_log_p_4 = (neg_log_p_4 * 0.5)
neg_log_v_1 = torch.matmul(unitarytransforminverse_velocity, velocity_differences)
neg_log_v_2 = torch.matmul(sigmadiag_velocity, neg_log_v_1)
neg_log_v_3 = torch.matmul(unitarytransform_velocity, neg_log_v_2)
neg_log_v_4 = torch.matmul(velocity_differencestranspose, neg_log_v_3).squeeze()
neg_log_v_4 = (neg_log_v_4 * 0.5)
loss_1 = neg_log_p_4 + neg_log_v_4
loss_2 = 0.0
if add_const:
const = (0.5 * torch.log(2 * np.pi * determinant))
neg_log_p_4 = neg_log_p_4 + const
loss_2 += const
return ((neg_log_p_4+neg_log_v_4).sum(dim=1)/target.size(1)).var()
def true_flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def KL_between_blocks(prob_list, num_atoms, eps=1e-16):
# Return a list of the mutual information between every block pair
KL_list = []
for i in range(len(prob_list)):
for j in range(len(prob_list)):
if i != j:
KL = prob_list[i] *( torch.log(prob_list[i] + eps) - torch.log(prob_list[j] + eps) )
KL_list.append( KL.sum() / (num_atoms * prob_list[i].size(0)) )
KL = prob_list[i] *( torch.log(prob_list[i] + eps) - torch.log( true_flip(prob_list[j],-1) + eps) )
KL_list.append( KL.sum() / (num_atoms * prob_list[i].size(0)) )
return KL_list
def decode_target( target, num_edge_types_list ):
target_list = []
base = np.prod(num_edge_types_list)
for i in range(len(num_edge_types_list)):
base /= num_edge_types_list[i]
target_list.append( target//base )
target = target % base
return target_list
def encode_target_list( target_list, edge_types_list ):
encoded_target = np.zeros( target_list[0].shape )
base = 1
for i in reversed(range(len(target_list))):
encoded_target += base*np.array(target_list[i])
base *= edge_types_list[i]
return encoded_target.astype('int')
def edge_accuracy_perm_NRI_batch(preds, target, num_edge_types_list):
# permutation edge accuracy calculator for the standard NRI model
# return the maximum accuracy of the batch over the permutations of the edge labels
# also returns a one-hot encoding of the number which represents this permutation
# also returns the accuracies for the individual factor graphs
_, preds = preds.max(-1) # returns index of max in each z_ij to reduce dim by 1
num_edge_types = np.prod(num_edge_types_list)
preds = np.eye(num_edge_types)[np.array(preds.cpu())] # this is nice way to turn integers into one-hot vectors
target = np.array(target.cpu())
perms = [p for p in permutations(range(num_edge_types))] # list of edge type permutations
# in the below, for each permutation of edge-types, permute preds, then take argmax to go from one-hot to integers
# then compare to target, compute accuracy
acc = np.array([np.mean(np.equal(target, np.argmax(preds[:,:,p], axis=-1),dtype=object)) for p in perms])
max_acc, idx = np.amax(acc), np.argmax(acc)
preds_deperm = np.argmax(preds[:,:,perms[idx]], axis=-1)
target_list = decode_target( target, num_edge_types_list )
preds_deperm_list = decode_target( preds_deperm, num_edge_types_list )
blocks_acc = [ np.mean(np.equal(target_list[i], preds_deperm_list[i], dtype=object),axis=-1)
for i in range(len(target_list)) ]
acc = np.mean(np.equal(target, preds_deperm ,dtype=object), axis=-1)
blocks_acc = np.swapaxes(np.array(blocks_acc),0,1)
idx_onehot = np.eye(len(perms))[np.array(idx)]
return acc, idx_onehot, blocks_acc
def edge_accuracy_perm_NRI(preds, targets, num_edge_types_list):
acc_batch, perm_code_onehot, acc_blocks_batch = edge_accuracy_perm_NRI_batch(preds, targets, num_edge_types_list)
acc = np.mean(acc_batch)
acc_var = np.var(acc_batch)
acc_blocks = np.mean(acc_blocks_batch, axis=0)
acc_var_blocks = np.var(acc_blocks_batch, axis=0)
return acc, perm_code_onehot, acc_blocks, acc_var, acc_var_blocks
def edge_accuracy_perm_fNRI_batch(preds_list, targets, num_edge_types_list):
# permutation edge accuracy calculator for the fNRI model
# return the maximum accuracy of the batch over the permutations of the edge labels
# also returns a one-hot encoding of the number which represents this permutation
# also returns the accuracies for the individual factor graphs
target_list = [ targets[:,i,:].cpu() for i in range(targets.shape[1])]
preds_list = [ pred.max(-1)[1].cpu() for pred in preds_list]
preds = encode_target_list(preds_list, num_edge_types_list)
target = encode_target_list(target_list, num_edge_types_list)
target_list = [ np.array(t.cpu()).astype('int') for t in target_list ]
num_edge_types = np.prod(num_edge_types_list)
preds = np.eye(num_edge_types)[preds] # this is nice way to turn integers into one-hot vectors
perms = [p for p in permutations(range(num_edge_types))] # list of edge type permutations
# in the below, for each permutation of edge-types, permute preds, then take argmax to go from one-hot to integers
# then compare to target to compute accuracy
acc = np.array([np.mean(np.equal(target, np.argmax(preds[:,:,p], axis=-1),dtype=object)) for p in perms])
max_acc, idx = | np.amax(acc) | numpy.amax |
from __future__ import print_function
import math
import overlap
def CalcTriangleAng(pts, angleCache, pt1, pt2, pt3):
angId = (pt1, pt2, pt3)
if angId in angleCache:
return angleCache[angId]
#Angle is computed on pt3. pt1 and pt2 define the side opposite the angle
pt1v = pts[pt1]
pt2v = pts[pt2]
pt3v = pts[pt3]
v31 = (pt1v[0] - pt3v[0], pt1v[1] - pt3v[1])
v32 = (pt2v[0] - pt3v[0], pt2v[1] - pt3v[1])
mv31 = (v31[0]**2. + v31[1]**2.) ** 0.5
mv32 = (v32[0]**2. + v32[1]**2.) ** 0.5
if mv31 == 0. or mv32 == 0.:
raise RuntimeError("Angle not defined for zero area triangles")
v31n = [c / mv31 for c in v31]
v32n = [c / mv32 for c in v32]
crossProd = - v31n[0] * v32n[1] + v31n[1] * v32n[0]
dotProd = v31n[0] * v32n[0] + v31n[1] * v32n[1]
#Limit to valid range
if dotProd > 1.: dotProd = 1.
if dotProd < -1.: dotProd = -1.
#print(crossProd < 0., crossProd)
#print(math.asin(crossProd), math.acos(dotProd), cosAng)
if crossProd < 0.:
#Reflex angle detected
trigAng = 2. * math.pi - math.acos(dotProd)
else:
#Acute or obtuse angle
trigAng = math.acos(dotProd)
angleCache[angId] = trigAng
return trigAng
def MergeHoleIntoOuter(workingPoly, pts, outerInd, hole, holeInd):
#Outer polygon before cut
filterWorkingPoly = list(workingPoly[:outerInd+1])
filteredPts = pts[:]
#Reorder hole
reorderedHole = hole[holeInd:]
reorderedHole.extend(hole[:holeInd])
#Insert hole
holdStartInd = len(filteredPts)
filteredPts.extend(reorderedHole)
filterWorkingPoly.extend(range(holdStartInd, holdStartInd+len(reorderedHole)))
#Close hole
filterWorkingPoly.append(holdStartInd)
#Outer polygon after cut
filterWorkingPoly.extend(workingPoly[outerInd:])
return filterWorkingPoly, filteredPts
def PointVisibility(pts, poly, holeInd, holeNum, holes, getSingleResult = 0):
visiblePoints = []
#print("holeShape", holeShape)
ptCoord = holes[holeNum][holeInd]
#Order points by distance
ptsByDist = []
for ptIndex, ptNum in enumerate(poly):
dist = ((ptCoord[0] - pts[poly[ptIndex]][0])**2.+(ptCoord[1] - pts[poly[ptIndex]][1])**2.)**0.5
ptsByDist.append((dist, ptIndex))
ptsByDist.sort()
#Check each point
for dist, ptIndex in ptsByDist:
ptNum = poly[ptIndex]
#See if any line segments block
blocked = False
for edgeStart, edgeStartPt in enumerate(poly):
edgeEnd = (edgeStart + 1) % len(poly)
if edgeStart == ptIndex: continue
if edgeEnd == ptIndex: continue
ret = overlap.LineSegmentIntersection((ptCoord, pts[ptNum]), (pts[poly[edgeStart]], pts[poly[edgeEnd]]))
#print(ptIndex, edgeStart, edgeEnd, ret)
#print((ptCoord, pts[ptNum]), (pts[poly[edgeStart]], pts[poly[edgeEnd]]))
if ret is not False:
blocked=True
break
#Check if the hole self blocks
holeShape = holes[holeNum]
for holePtNum, holeChkCoord in enumerate(holeShape):
if blocked:
break
nextPtNum = (holePtNum + 1) % len(holeShape)
if holePtNum == holeInd: continue
if nextPtNum == holeInd: continue
ret = overlap.LineSegmentIntersection((ptCoord, pts[ptNum]), (holeShape[holePtNum], holeShape[nextPtNum]))
#print(ptIndex, holeInd, holePtNum, nextPtNum, ret)
if ret is not False:
#print((ptCoord, pts[ptNum]), (holeShape[holePtNum], holeShape[nextPtNum]))
blocked=True
#Check if it would be blocked by a future hole
for holeNumChk, holeShape in enumerate(holes):
if blocked:
break
if holeNumChk == holeNum: continue #Already done self collisions
for holePtNum, holeChkCoord in enumerate(holeShape):
if blocked:
break
nextPtNum = (holePtNum + 1) % len(holeShape)
if holePtNum == holeInd: continue
if nextPtNum == holeInd: continue
ret = overlap.LineSegmentIntersection((ptCoord, pts[ptNum]), (holeShape[holePtNum], holeShape[nextPtNum]))
#print(ptIndex, holeInd, holePtNum, nextPtNum, ret)
if ret is not False:
#print((ptCoord, pts[ptNum]), (holeShape[holePtNum], holeShape[nextPtNum]))
blocked=True
#print(ptNum, blocked)
if not blocked:
dist = ((ptCoord[0] - pts[poly[ptIndex]][0])**2.+(ptCoord[1] - pts[poly[ptIndex]][1])**2.)**0.5
visiblePoints.append((dist, ptIndex))
if getSingleResult and len(visiblePoints) > 0:
break
visiblePoints.sort()
return visiblePoints
def CheckNodeWindingDirection(pts, poly):
#http://stackoverflow.com/a/1165943
#Calculate area of polygon, sign indicates winding direction
total = 0.
for i, ptNum in enumerate(poly):
currentPt = pts[ptNum]
nextPt = pts[poly[(i+1)%len(poly)]]
a = (nextPt[0]-currentPt[0])*(nextPt[1]+currentPt[1])
total += a
return total * 0.5
def MergeHolesIntoOuterPoly(poly, holes):
#Check outer polygon node order
if CheckNodeWindingDirection(poly, range(len(poly))) > 0.:
workingPoly = range(len(poly))[::-1]
pts = poly[:]
else:
workingPoly = range(len(poly))
pts = poly[:]
#Check holes node order
holes = holes[:]
for holeNum, hole in enumerate(holes):
if CheckNodeWindingDirection(hole, range(len(hole))) < 0.:
holes[holeNum] = hole[::-1]
else:
holes[holeNum] = hole[:]
for holeNum, hole in enumerate(holes):
#Find place to make cut
foundCut = None
for holdPtNum, holeCoord in enumerate(hole):
visible = PointVisibility(pts, workingPoly, holdPtNum, holeNum, holes, True)
#print("vis", holeCoord, visible)
if len(visible) > 0:
if foundCut is None:
foundCut = (visible[0][1], holdPtNum, visible[0][0])
elif visible[0][0] < foundCut[2]:
#Use nearer point
foundCut = (visible[0][1], holdPtNum, visible[0][0])
#print("better cut found", holeNum, holdPtNum, visible)
if foundCut is None:
raise RuntimeError("Failed to join hole to other polygon")
workingPoly, pts = MergeHoleIntoOuter(workingPoly, pts, foundCut[0], hole, foundCut[1])
#print("wp", workingPoly)
#print("pts", pts)
return workingPoly, pts
def EarClippingNoHoles(workingPoly, pts, nodeOrder = 1, debug = 0):
if debug:
import matplotlib.pyplot as plt
import numpy as np
ptsArr = | np.array(pts) | numpy.array |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import math
import argparse
import itertools
import csv
from scipy.stats import linregress
from scipy.optimize import minimize
read_num_seq_lineage_global = None
read_num_min_seq_lineage_global = None
read_depth_seq_global = None
t_seq_global = None
kappa_global = None
x_mean_global = None
def fun_estimate_parameters(x, read_num_seq, t_seq, kappa=2.5, fitness_type='m'):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE LOG LIKELIHOOD VALUE OF EACH GENOTYPE GIVEN ITS
# FITNESS, THE ESTIMATED READ NUMBER PER GENOTYPE PER SEQUENCING TIME-POINT, AND THE ESTIMATED MEAN FITNESS PER
# SEQUENCING TIME-POINT
#
# INPUTS
# --x: fitness of each genotype, [x1, x2, ...]
# --read_num_seq: read number per genotype at each sequencing time-point
# --t_seq: sequenced time-points in number of generations, [0, t1, t2, ...]
# --kappa: a noise parameter that characterizes the total noise introduced by growth, cell transfer, DNA extraction,
# PCR, and sequencing (To measure kappa empirically, see the reference: [<NAME>, et al. Quantitative
# Evolutionary Dynamics Using High-resolution Lineage Tracking. Nature, 519: 181–186 (2015)].)
# . (default: 2.5)
# --fitness_type: type of fitness: Wrightian fitness (w), or Malthusian fitness (m)' (default: m)
#
# OUTPUTS
# --estimate_parameters_output: log likelihood value of each genotype,
# estimated reads number per genotype per sequencing time-point,
# estimated mean fitness per sequencing time-point, [x_mean(t0),x_mean(t1),...]
# ------------------------------------------------------------------------------------------------------------------
read_num_seq = read_num_seq.astype(float)
read_num_seq[read_num_seq == 0] = 1e-1
read_depth_seq = np.sum(read_num_seq, axis=0)
lineages_num, seq_num = read_num_seq.shape
read_num_min_seq = np.zeros((lineages_num, seq_num))
read_num_min_seq[:, 0] = read_num_seq[:, 0]
for i in range(1, seq_num):
read_num_min_seq[:, i] = read_num_min_seq[:, i - 1] / 2 ** (t_seq[i] - t_seq[i - 1])
x[x <= -1] = -1 + 1e-7
x_mean = np.zeros(seq_num)
read_num_seq_est = np.zeros((lineages_num, seq_num))
read_num_seq_est[:, 0] = read_num_seq[:, 0]
likelihood_log_seq = np.zeros((lineages_num, seq_num))
if fitness_type == 'w':
for i in range(1, seq_num):
x_mean[i] = np.max(np.dot(x, read_num_seq[:, i]) / read_depth_seq[i], 0)
read_num_est_tempt = np.exp((t_seq[i] - t_seq[i - 1]) * (np.log(1 + x) + 1)
- (t_seq[i] - t_seq[i - 1]) / (x_mean[i] - x_mean[i - 1])
* ((x_mean[i] + 1) * np.log(x_mean[i] + 1)
- (x_mean[i - 1] + 1) * np.log(x_mean[i - 1] + 1)))
read_num_est_tempt = read_num_est_tempt * read_num_seq[:, i - 1] / read_depth_seq[i - 1] * read_depth_seq[i]
read_num_seq_est[:, i] = np.max([read_num_est_tempt, read_num_min_seq[:, i]], axis=0)
x_mean[i] = np.dot(x, read_num_seq_est[:, i]) / np.sum(read_num_seq_est[:, i])
elif fitness_type == 'm':
for i in range(1, seq_num):
x_mean[i] = np.max(np.dot(x, read_num_seq[:, i]) / read_depth_seq[i], 0)
read_num_est_tempt = np.exp((t_seq[i] - t_seq[i - 1]) * x
- (t_seq[i] - t_seq[i - 1]) * (x_mean[i] + x_mean[i - 1]) / 2)
read_num_est_tempt = read_num_est_tempt * read_num_seq[:, i - 1] / read_depth_seq[i - 1] * read_depth_seq[i]
read_num_seq_est[:, i] = np.max([read_num_est_tempt, read_num_min_seq[:, i]], axis=0)
x_mean[i] = np.dot(x, read_num_seq_est[:, i]) / np.sum(read_num_seq_est[:, i])
pos1_r, pos1_c = np.where(read_num_seq[:, :-1] >= 20)
likelihood_log_seq[pos1_r, pos1_c + 1] = (0.25 * np.log(read_num_seq_est[pos1_r, pos1_c + 1])
- 0.5 * np.log(4 * np.pi * kappa)
- 0.75 * np.log(read_num_seq_est[pos1_r, pos1_c + 1])
- (np.sqrt(read_num_seq[pos1_r, pos1_c + 1])
- np.sqrt(read_num_seq_est[pos1_r, pos1_c + 1])) ** 2 / kappa)
pos_r, pos_c = np.where(read_num_seq[:, :-1] < 20)
pos_p1 = np.where(read_num_seq[pos_r, pos_c + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq[pos_r, pos_c + 1] < 10)[0]
pos2_r = pos_r[pos_p1]
pos2_c = pos_c[pos_p1]
pos3_r = pos_r[pos_p2]
pos3_c = pos_c[pos_p2]
likelihood_log_seq[pos2_r, pos2_c + 1] = (np.multiply(read_num_seq[pos2_r, pos2_c + 1],
np.log(read_num_seq_est[pos2_r, pos2_c + 1]))
- read_num_seq_est[pos2_r, pos2_c + 1]
- np.multiply(read_num_seq[pos2_r, pos2_c + 1],
np.log(read_num_seq[pos2_r, pos2_c + 1]))
+ read_num_seq[pos2_r, pos2_c + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq[pos2_r, pos2_c + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq[pos3_r, pos3_c + 1].astype(int)]
likelihood_log_seq[pos3_r, pos3_c + 1] = (np.multiply(read_num_seq[pos3_r, pos3_c + 1],
np.log(read_num_seq_est[pos3_r, pos3_c + 1]))
- read_num_seq_est[pos3_r, pos3_c + 1]
- np.log(factorial_tempt))
likelihood_log = np.sum(likelihood_log_seq, axis=1)
estimate_parameters_output = {'Likelihood_Log': likelihood_log,
'Estimated_Read_Number': read_num_seq_est,
'Estimated_Mean_Fitness': x_mean}
return estimate_parameters_output
def fun_likelihood_lineage_w(x):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE SUM OF THE NEGATIVE LOG LIKELIHOOD VALUE OF ALL
# GENOTYPES GIVEN THE WRIGHTIAN FITNESS OF EACH GENOTYPE
#
# INPUTS
# --x: fitness of a genotype
#
# OUTPUTS
# --likelihood_log_lineage: the negative log likelihood value of the genotype
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
if x <= -1:
x = -1 + 1e-7
seq_num = read_num_seq_lineage_global.shape[0]
read_num_seq_lineage_est = np.zeros(seq_num)
read_num_seq_lineage_est[0] = read_num_seq_lineage_global[0]
likelihood_log_seq_lineage = np.zeros(seq_num)
for i in range(1, seq_num):
read_num_lineage_est_tempt = np.exp((t_seq_global[i] - t_seq_global[i - 1]) * (np.log(1 + x) + 1)
- (t_seq_global[i] - t_seq_global[i - 1]) / (
x_mean_global[i] - x_mean_global[i - 1])
* ((x_mean_global[i] + 1) * np.log(x_mean_global[i] + 1)
- (x_mean_global[i - 1] + 1) * np.log(x_mean_global[i - 1] + 1)))
read_num_lineage_est_tempt = (read_num_lineage_est_tempt * read_num_seq_lineage_global[i - 1]
/ read_depth_seq_global[i - 1] * read_depth_seq_global[i])
read_num_seq_lineage_est[i] = np.max([read_num_lineage_est_tempt.item(), read_num_min_seq_lineage_global[i]])
pos1 = np.where(read_num_seq_lineage_global[:-1] >= 20)[0]
likelihood_log_seq_lineage[pos1 + 1] = (0.25 * np.log(read_num_seq_lineage_est[pos1 + 1])
- 0.5 * np.log(4 * np.pi * kappa_global)
- 0.75 * np.log(read_num_seq_lineage_est[pos1 + 1])
- (np.sqrt(read_num_seq_lineage_global[pos1 + 1])
- np.sqrt(read_num_seq_lineage_est[pos1 + 1])) ** 2 / kappa_global)
pos = np.where(read_num_seq_lineage_global[:-1] < 20)[0]
pos_p1 = np.where(read_num_seq_lineage_global[pos + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq_lineage_global[pos + 1] < 10)[0]
pos2 = pos[pos_p1]
pos3 = pos[pos_p2]
likelihood_log_seq_lineage[pos2 + 1] = (np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_est[pos2 + 1]))
- read_num_seq_lineage_est[pos2 + 1]
- np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_global[pos2 + 1]))
+ read_num_seq_lineage_global[pos2 + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq_lineage_global[pos2 + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq_lineage_global[pos3 + 1].astype(int)]
likelihood_log_seq_lineage[pos3 + 1] = (np.multiply(read_num_seq_lineage_global[pos3 + 1],
np.log(read_num_seq_lineage_est[pos3 + 1]))
- read_num_seq_lineage_est[pos3 + 1]
- np.log(factorial_tempt))
likelihood_log_lineage = np.sum(likelihood_log_seq_lineage)
return -likelihood_log_lineage
def fun_likelihood_lineage_m(x):
# ------------------------------------------------------------------------------------------------------------------
# A SUB-FUNCTION CALLED BY MAIN FUNCTION main() TO CALCULATE THE SUM OF THE NEGATIVE LOG LIKELIHOOD VALUE OF ALL
# GENOTYPES GIVEN THE MALTHUSIAN FITNESS OF EACH GENOTYPE
#
# INPUTS
# --x: fitness of a genotype
#
# OUTPUTS
# --likelihood_log_lineage: the negative log likelihood value of the genotype
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
if x <= -1:
x = -1 + 1e-7
seq_num = read_num_seq_lineage_global.shape[0]
read_num_seq_lineage_est = np.zeros(seq_num)
read_num_seq_lineage_est[0] = read_num_seq_lineage_global[0]
likelihood_log_seq_lineage = np.zeros(seq_num)
for i in range(1, seq_num):
read_num_lineage_est_tempt = np.exp((t_seq_global[i] - t_seq_global[i - 1]) * x
- (t_seq_global[i] - t_seq_global[i - 1]) *
(x_mean_global[i] + x_mean_global[i - 1]) / 2)
read_num_lineage_est_tempt = (read_num_lineage_est_tempt * read_num_seq_lineage_global[i - 1]
/ read_depth_seq_global[i - 1] * read_depth_seq_global[i])
read_num_seq_lineage_est[i] = np.max([read_num_lineage_est_tempt.item(), read_num_min_seq_lineage_global[i]])
pos1 = np.where(read_num_seq_lineage_global[:-1] >= 20)[0]
likelihood_log_seq_lineage[pos1 + 1] = (0.25 * np.log(read_num_seq_lineage_est[pos1 + 1])
- 0.5 * np.log(4 * np.pi * kappa_global)
- 0.75 * np.log(read_num_seq_lineage_est[pos1 + 1])
- (np.sqrt(read_num_seq_lineage_global[pos1 + 1])
- np.sqrt(read_num_seq_lineage_est[pos1 + 1])) ** 2 / kappa_global)
pos = np.where(read_num_seq_lineage_global[:-1] < 20)[0]
pos_p1 = np.where(read_num_seq_lineage_global[pos + 1] >= 10)[0]
pos_p2 = np.where(read_num_seq_lineage_global[pos + 1] < 10)[0]
pos2 = pos[pos_p1]
pos3 = pos[pos_p2]
likelihood_log_seq_lineage[pos2 + 1] = (np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_est[pos2 + 1]))
- read_num_seq_lineage_est[pos2 + 1]
- np.multiply(read_num_seq_lineage_global[pos2 + 1],
np.log(read_num_seq_lineage_global[pos2 + 1]))
+ read_num_seq_lineage_global[pos2 + 1]
- 0.5 * np.log(2 * np.pi * read_num_seq_lineage_global[pos2 + 1]))
factorial_tempt = [float(math.factorial(i)) for i in read_num_seq_lineage_global[pos3 + 1].astype(int)]
likelihood_log_seq_lineage[pos3 + 1] = (np.multiply(read_num_seq_lineage_global[pos3 + 1],
np.log(read_num_seq_lineage_est[pos3 + 1]))
- read_num_seq_lineage_est[pos3 + 1]
- np.log(factorial_tempt))
likelihood_log_lineage = np.sum(likelihood_log_seq_lineage)
return -likelihood_log_lineage
def main():
# ------------------------------------------------------------------------------------------------------------------
# ESTIMATE FITNESS OF EACH GENOTYPE IN A COMPETITIVE POOLED GROWTH EXPERIMENT
#
# OPTIONS
# --input: a .csv file, with each column being the read number per genotype at each sequenced time-point
# --t_seq: sequenced time-points in number of generations (format: 0 t1 t2 ...)
# --max_iter_num: maximum number of iterations in the optimization (Small numbers can reduce running time
# and decrease accuracy.) (default: 10)
# --kappa: a noise parameter that characterizes the total noise introduced by growth, cell transfer,
# DNA extraction, PCR, and sequencing (To measure kappa empirically, see the reference:
# [<NAME>, et al. Quantitative Evolutionary Dynamics Using High-resolution Lineage Tracking.
# Nature, 519: 181–186 (2015)].) (default: 2.5)
# --regression_num: number of points used in the initial linear-regression-based fitness estimate (default: 2)
# --fitness_type: type of fitness: Wrightian fitness (w), or Malthusian fitness (m)' (default: m)
# --output_filename: prefix of output .csv files (default: output)
#
# OUTPUTS
# output_filename_FitSeq_Result.csv: 1st column: estimated fitness of each genotype, [x1, x2, ...],
# 2nd column: log likelihood value of each genotype, [f1, f2, ...],
# 3rd column: estimated mean fitness per sequenced time-point
# [x_mean(0), x_mean(t1), ...],
# 4th column+: estimated reads number per genotype per sequencingtime-point,
# with each time-point being a column
# ------------------------------------------------------------------------------------------------------------------
global read_num_seq_lineage_global
global read_num_min_seq_lineage_global
global read_depth_seq_global
global t_seq_global
global kappa_global
global x_mean_global
parser = argparse.ArgumentParser(description='Estimate fitness of each genotype in a competitive pooled growth '
'experiment', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', type=str, help='a .csv file: with each column being the read number per '
'genotype at each sequenced time-point')
parser.add_argument('-t', '--t_seq', nargs='*', type=float, help='sequenced time-points in number of generations')
parser.add_argument('-m', '--max_iter_num', type=int, default=10,
help='maximum number of iterations in the optimization')
parser.add_argument('-k', '--kappa', type=float, default=2.5,
help='a noise parameter that characterizes the total noise introduced by growth, '
'cell transfer, DNA extraction, PCR, and sequencing (To measure kappa empirically, '
'see the reference: [<NAME>, et al. Quantitative Evolutionary Dynamics Using '
'High-resolution Lineage Tracking. Nature, 519: 181–186 (2015)].)')
parser.add_argument('-g', '--regression_num', type=int, default=2,
help='number of points used in the initial linear-regression-based fitness estimate')
parser.add_argument('-f', '--fitness_type', type=str, default='m',
help='type of fitness: Wrightian fitness (w), or Malthusian fitness (m)')
parser.add_argument('-o', '--output_filename', type=str, default='output', help='prefix of output .csv files')
args = parser.parse_args()
read_num_seq = np.array(pd.read_csv(args.input, header=None), dtype=float)
t_seq = np.array(args.t_seq, dtype=float)
max_iter_num = args.max_iter_num
kappa = args.kappa
regression_num = args.regression_num
fitness_type = args.fitness_type
output_filename = args.output_filename
for i in range(regression_num):
pos_zero = np.where(read_num_seq[:, i] < 1)
read_num_seq[pos_zero, i] = 1
# pos_zero = np.where(read_num_seq[:, 0] < 1)
# read_num_seq[pos_zero, 0] = 1
read_num_seq[read_num_seq == 0] = 1e-1
read_depth_seq = | np.sum(read_num_seq, axis=0) | numpy.sum |
from functools import cached_property
from typing import Callable, TypeVar, Tuple
import numpy as np
from .GeoObject import GeoObject
from .Point import Point
from ..data.Function import Function
from ..common.logger import logger
from spdm.geometry.GeoObject import GeoObject, _TCoord
class Curve(GeoObject):
# @staticmethod
# def __new__(cls, *args, type=None, **kwargs):
# if len(args) == 0:
# raise RuntimeError(f"Illegal input! {len(args)}")
# shape = [(len(a) if isinstance(a, np.ndarray) else 1) for a in args]
# if all([s == 1 for s in shape]):
# return object.__new__(Point)
# elif cls is not Curve:
# return object.__new__(cls)
# else:
# # FIXME: find module
# return object.__new__(Curve)
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def points(self, *args, **kwargs):
return super().points(*args, **kwargs)
@cached_property
def dl(self) -> np.ndarray:
x, y = np.moveaxis(self.points(), -1, 0)
a, b = self.derivative()
# a = a[:-1]
# b = b[:-1]
dx = x[1:]-x[:-1]
dy = y[1:]-y[:-1]
m1 = (-a[:-1]*dy+b[:-1]*dx)/(a[:-1]*dx+b[:-1]*dy)
# a = np.roll(a, 1, axis=0)
# b = np.roll(b, 1, axis=0)
m2 = (-a[1:]*dy+b[1:]*dx)/(a[1:]*dx+b[1:]*dy)
return np.sqrt(dx**2+dy**2)*(1 + (2.0*m1**2+2.0*m2**2-m1*m2)/30)
@cached_property
def length(self):
return np.sum(self.dl)
def integral(self, func: Callable[[_TCoord, _TCoord], _TCoord]) -> float:
x, y = self.xyz
val = func(x, y)
# c_pts = self.points((self._mesh[0][1:] + self._mesh[0][:-1])*0.5)
return np.sum(0.5*(val[:-1]+val[1:]) * self.dl)
# def average(self, func: Callable[[_TCoord, _TCoord], _TCoord]) -> float:
# return self.integral(func)/self.length
def encloses_point(self, *x: float, **kwargs) -> bool:
return super().enclosed(**x, **kwargs)
def trim(self):
return NotImplemented
def remesh(self, mesh_type=None):
return NotImplemented
class Line(Curve):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, is_closed=False, **kwargs)
def intersect2d(a0: Point, a1: Point, b0: Point, b1: Point) -> Tuple[float, float]:
da = a1-a0
db = b1-b0
dp = a0-b0
dap = [-da[1], da[0]]
dbp = [-db[1], db[0]]
return (np.dot(dbp, dp) / | np.dot(dbp, da) | numpy.dot |
import csv
import json
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from mlxtend.plotting import plot_decision_regions
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.preprocessing import MinMaxScaler, StandardScaler, PolynomialFeatures
# ZADANIE 1 DO 1 Wykorzystaj problem dotyczący ładowania baterii
# wyświetl statystki danych - czy wyjaśniają dane i pozwalają dobrać właściwy model?
# jakiego modelu użyłeś na poprzednich zajęciach?
# wytrenuj modele bazujące na regresji liniowej, nieliniowej i drzewie decyzyjnym, porównaj wyniki.
# wykorzystaj bardziej złożony estymator typu las losowy.
def zad1():
data = []
with open('./_data/trainingdata.txt', 'r') as csv_f:
csv_reader = csv.reader(csv_f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
for row in csv_reader:
data.append(row)
data = np.array(data)
x = data[:, 0].reshape(-1, 1)
y = data[:, 1].reshape(-1, 1)
reg = LinearRegression()
reg.fit(x, y)
pred = reg.predict(x)
model = Pipeline([('poly', PolynomialFeatures(degree=10)),
('linear', LinearRegression(fit_intercept=False))])
model.fit(x, y)
poly_pred = model.predict(x)
plt.plot(x, y, 'ro')
plt.plot(x, pred, 'bo')
plt.plot(x, poly_pred, 'go')
plt.show()
X, y = datasets.load_iris(return_X_y=True, as_frame=True)
# ZADANIE 2
# Załaduj zbiór danych i wyświetl informacje o nim. Załaduj jako pandas dataframe
# (argument as_frame=True) i wykorzystaj metodę describe.
# Czy możemy uzyskać jakieś wartościowe informacje?
def zad2():
print(f'Describe: \n {X.describe()}')
print(f'Head: \n {X.head()}')
# ZADANIE 3 i 4 Podziel wczytane dane na zbiór treningowy i testowy, w proporcjach 80%/20%.
# sprawdź, jakie jest procentowe rozłożenie poszczególnych klas w zbiorze treningowym i testowym.
# Dobrze by dystrybucje klas próbek w tych zbiorach były identyczne
# zmodyfikuj poprzedni kod tak, żeby dane po podziale spełniały ten warunek
def zad3_4():
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
X_train = X_train[['sepal length (cm)', 'sepal width (cm)']]
X_test = X_test[['sepal length (cm)', 'sepal width (cm)']]
print(y_train.value_counts() / len(y_train) * 100)
# ZADANIE 5 Wykorzystaj przedstawioną funkcjonalność do normalizacji danych.
# Wygeneruj wykres, którym sprawdzisz, jak wyglądają wartości danych po przeskalowaniu
# (ogranicz się do dwóch cech datasetu Iris).
# Jakie według Ciebie powinny być przedziały poszczególnych cech?
# Jakie są one w rzeczywistości (czy pokrywają się z Twoim wyobrażeniem)?
# Czy poszczególne cechy znajdują się na tej samej skali?
def zad5_6():
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
X_train = X_train[['sepal length (cm)', 'sepal width (cm)']]
X_test = X_test[['sepal length (cm)', 'sepal width (cm)']]
# Wizualizujemy tylko dwie pierwsze cechy – aby móc je przedstawić bez problemu w 2D.
plt.scatter(np.array(X)[:, 0], np.array(X)[:, 1])
plt.axvline(x=0)
plt.axhline(y=0)
plt.title('Iris sepal features')
plt.xlabel('sepal length (cm)')
plt.ylabel('sepal width (cm)')
plt.show()
skaler = MinMaxScaler()
# skaler = StandardScaler()
skaler.fit(X_train)
X_train = skaler.transform(X_train)
plt.scatter(np.array(X_train)[:, 0],
np.array(X_train)[:, 1])
plt.axvline(x=0)
plt.axhline(y=0)
plt.show()
# ZADANIE 7 Zastosuj pipeline do aplikacji zrealizowane funkcji normalizacji.
# ZADANIE 8 Wytrenuj klasyfikator dla bazy danych Iris.
# ZADANIE 9 wizualizacja przestrzeni decyzyjnej
def zad789():
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
X_train = X_train[['sepal length (cm)', 'sepal width (cm)']]
X_test = X_test[['sepal length (cm)', 'sepal width (cm)']]
clf = Pipeline([
('skaler', MinMaxScaler()),
('svc', SVC())
])
clf.fit(X_train, y_train)
src = clf.score(X_test, y_test)
print(f'score : {src}')
plot_decision_regions(np.array(X_test), np.array(y_test), clf=clf, legend=1)
print(clf.predict(X_test)[:5])
plt.scatter(np.array(X_train)[:, 0], np.array(X_train)[:, 1])
plt.axvline(x=0)
plt.axhline(y=0)
plt.title('Iris sepal features')
plt.xlabel('sepal length (cm)')
plt.ylabel('sepal width (cm)')
plt.show()
# ZADANIE 10 Napisz kod, który przeprowadzi trening na klasyfikatorach
# (LogisticRegression, SVC, DecisionTreeClassifier, RandomForestClassifier),
# przeprowadzi predykcję/sprawdzi dokładność na zbiorze testowym.
# Wyniki poszczególnych algorytmów zapisz w słowniku.
def zad10_11():
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
X_train = X_train[['sepal length (cm)', 'sepal width (cm)']]
X_test = X_test[['sepal length (cm)', 'sepal width (cm)']]
klasyfikatory = ["LogisticRegression", "SVC", "DecisionTreeClassifier", "RandomForestClassifier"]
wyniki = dict()
for classifier in klasyfikatory:
clf = Pipeline([
('skaler', MinMaxScaler()),
(classifier, globals()[classifier]())
])
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
print(globals()["SVC"])
a = globals()["SVC"]()
a.fit(X_train, y_train)
print(a.score(X_test, y_test))
# pętla przechodząca po klasyfikatorach i sprawdzająca, który sprawdza się najlepiej do danego problemu
for classifier in klasyfikatory:
a = globals()[classifier]()
a.fit(X_train, y_train)
print(f'Wynik dla {classifier} = {a.score(X_test, y_test)}')
wyniki[classifier] = a.score(X_test, y_test)
plot_decision_regions(np.array(X_test), np.array(y_test), clf=clf, legend=1)
plt.scatter(np.array(X_train)[:, 0], | np.array(X_train) | numpy.array |
import sys
import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable
import torch.nn.functional as F
import cv2
from scipy import spatial
import struct
import imghdr
import cython
from scipy.special import softmax
#TensorRT stuff
from numpy import array
import pycuda.driver as cuda
import pycuda.autoinit
import tensorrt as trt
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
from numba import jit
from numba import vectorize, float64
import numba as nb
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def get_all_files(directory):
files = []
for f in os.listdir(directory):
if os.path.isfile(os.path.join(directory, f)):
files.append(os.path.join(directory, f))
else:
files.extend(get_all_files(os.path.join(directory, f)))
return files
def calcAngularDistance(gt_rot, pr_rot):
rotDiff = np.dot(gt_rot, np.transpose(pr_rot))
trace = np.trace(rotDiff)
return np.rad2deg(np.arccos((trace-1.0)/2.0))
def get_camera_intrinsic():
K = np.zeros((3, 3), dtype='float64')
# life came
# K[0, 0], K[0, 2] = 1.13908155e+03, 6.57642892e+02
# K[1, 1], K[1, 2] = 1.13705701e+03, 3.28071843e+02
# K[2, 2] = 1.
# Logitech C920
K[0, 0], K[0, 2] = 935.67, 624.06
K[1, 1], K[1, 2] = 934.86, 354.35
K[2, 2] = 1.
return K
def get_camera_distortion_mat():
dist = [[-0.00580032, -0.17520014, 0.00051201, 0.00432754, 0.24850474]]
return np.array(dist)
def compute_projection(points_3D, transformation, internal_calibration):
projections_2d = np.zeros((2, points_3D.shape[1]), dtype='float32')
camera_projection = (internal_calibration.dot(transformation)).dot(points_3D)
projections_2d[0, :] = camera_projection[0, :]/camera_projection[2, :]
projections_2d[1, :] = camera_projection[1, :]/camera_projection[2, :]
return projections_2d
def compute_transformation(points_3D, transformation):
return transformation.dot(points_3D)
def calc_pts_diameter(pts):
diameter = -1
for pt_id in range(pts.shape[0]):
pt_dup = np.tile(np.array([pts[pt_id, :]]), [pts.shape[0] - pt_id, 1])
pts_diff = pt_dup - pts[pt_id:, :]
max_dist = math.sqrt((pts_diff * pts_diff).sum(axis=1).max())
if max_dist > diameter:
diameter = max_dist
return diameter
def adi(pts_est, pts_gt):
nn_index = spatial.cKDTree(pts_est)
nn_dists, _ = nn_index.query(pts_gt, k=1)
e = nn_dists.mean()
return e
def get_3D_corners(vertices):
min_x = np.min(vertices[0,:])
max_x = np.max(vertices[0,:])
min_y = np.min(vertices[1,:])
max_y = np.max(vertices[1,:])
min_z = np.min(vertices[2,:])
max_z = np.max(vertices[2,:])
# use stub since we know the cargo ball's bounding box
#min_x = -0.33/2
#max_x = 0.33/2
#min_y = -0.33/2
#max_y = 0.33/2
#min_z = -0.33/2
#max_z = 0.33/2
corners = np.array([[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z]])
corners = np.concatenate((np.transpose(corners), np.ones((1,8)) ), axis=0)
return corners
def pnp(points_3D, points_2D, cameraMatrix):
try:
distCoeffs = pnp.distCoeffs
except:
distCoeffs = np.zeros((8, 1), dtype='float32')
assert points_2D.shape[0] == points_2D.shape[0], 'points 3D and points 2D must have same number of vertices'
_, rvecs, tvecs = cv2.solvePnP(points_3D,
# points_2D,
np.ascontiguousarray(points_2D[:,:2]).reshape((-1,1,2)),
cameraMatrix,
distCoeffs)
# , None, None, False, cv2.SOLVEPNP_UPNP)
# R_exp, t, _ = cv2.solvePnPRansac(points_3D,
# points_2D,
# cameraMatrix,
# distCoeffs,
# reprojectionError=12.0)
#
R, _ = cv2.Rodrigues(rvecs)
# Rt = np.c_[R, t]
return rvecs, R, tvecs
def get_2d_bb(box, size):
x = box[0]
y = box[1]
min_x = np.min(np.reshape(box, [9,2])[:,0])
max_x = np.max(np.reshape(box, [9,2])[:,0])
min_y = np.min(np.reshape(box, [9,2])[:,1])
max_y = np.max(np.reshape(box, [9,2])[:,1])
w = max_x - min_x
h = max_y - min_y
new_box = [x*size, y*size, w*size, h*size]
return new_box
def compute_2d_bb(pts):
min_x = np.min(pts[0,:])
max_x = np.max(pts[0,:])
min_y = np.min(pts[1,:])
max_y = np.max(pts[1,:])
w = max_x - min_x
h = max_y - min_y
cx = (max_x + min_x) / 2.0
cy = (max_y + min_y) / 2.0
new_box = [cx, cy, w, h]
return new_box
def compute_2d_bb_from_orig_pix(pts, size):
min_x = np.min(pts[0,:]) / 1280.0
max_x = np.max(pts[0,:]) / 1280.0
min_y = np.min(pts[1,:]) / 720.0
max_y = np.max(pts[1,:]) / 720.0
w = max_x - min_x
h = max_y - min_y
cx = (max_x + min_x) / 2.0
cy = (max_y + min_y) / 2.0
new_box = [cx*size, cy*size, w*size, h*size]
return new_box
def bbox_iou(box1, box2, x1y1x2y2=False):
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def bbox_iou_cube(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
# point 1, 3, 5, 7 are points that form the front face of the cube
# point 3 and 5 are the upper left and lower right points of the rectangle, to be used for nms area overlap calculation
# nms algorithm x1 is point 3's X coordinate which has index 6 in the "boxes" array of length 21
# nms algorithm y1 is point 3's Y coordinate which has index 7 in the "boxes" array of length 21
# nms algorithm x2 is point 5's X coordinate which has index 10 in the "boxes" array of length 21
# nms algorithm y2 is point 5's y coordinate which has index 11 in the "boxes" array of length 21
# With above chocie, we pick index 6, 7, 10 and 11 from the "boxes" array of length 21, for nms
mx = min(box1[6], box2[6])
Mx = max(box1[10], box2[10])
my = min(box1[7], box2[7])
My = max(box1[11], box2[11])
w1 = box1[10] - box1[6]
h1 = box1[11] - box1[7]
w2 = box2[10] - box2[6]
h2 = box2[11] - box2[7]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def convert_bbox_format_for_sorting(bboxes):
all_boxes = []
for i in range(len(bboxes)):
w = 1280
h = 720
x1 = bboxes[i][6]*w
y1 = bboxes[i][7]*h
x2 = bboxes[i][10]*w
y2 = bboxes[i][11]*h
confidence = bboxes[i][18]
confidence = bboxes[i][18]
class_label = bboxes[i][20]
all_boxes.append([x1, y1, x2, y2, confidence, confidence, class_label])
return all_boxes
def corner_confidences(gt_corners, pr_corners, th=30, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a torch.FloatTensor of shape (nA,) with 8 confidence values
'''
shape = gt_corners.size()
nA = shape[1]
dist = gt_corners - pr_corners
dist = dist.t().contiguous().view(nA, 8, 2)
dist[:, :, 0] = dist[:, :, 0] * im_width
dist[:, :, 1] = dist[:, :, 1] * im_height
eps = 1e-5
distthresh = torch.FloatTensor([th]).repeat(nA, 8)
dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 8
mask = (dist < distthresh).type(torch.FloatTensor)
conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)
conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1
conf = conf / conf0.repeat(1, 8)
# conf = 1 - dist/distthresh
conf = mask * conf # nA x 8
mean_conf = torch.mean(conf, dim=1)
return mean_conf
def corner_confidence(gt_corners, pr_corners, th=30, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16,) type: list
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16,), type: list
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a list of shape (8,) with 8 confidence values
'''
dist = torch.FloatTensor(gt_corners) - pr_corners
dist = dist.view(8, 2)
dist[:, 0] = dist[:, 0] * im_width
dist[:, 1] = dist[:, 1] * im_height
eps = 1e-5
dist = torch.sqrt(torch.sum((dist)**2, dim=1))
mask = (dist < th).type(torch.FloatTensor)
conf = torch.exp(sharpness * (1.0 - dist/th)) - 1
conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps
conf = conf / conf0.repeat(8, 1)
# conf = 1.0 - dist/th
conf = mask * conf
return torch.mean(conf)
def corner_confidences9(gt_corners, pr_corners, th=80, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a torch.FloatTensor of shape (nA,) with 9 confidence values
'''
shape = gt_corners.size()
nA = shape[1]
dist = gt_corners - pr_corners
dist = dist.t().contiguous().view(nA, 9, 2)
dist[:, :, 0] = dist[:, :, 0] * im_width
dist[:, :, 1] = dist[:, :, 1] * im_height
eps = 1e-5
distthresh = torch.FloatTensor([th]).repeat(nA, 9)
dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 9
mask = (dist < distthresh).type(torch.FloatTensor)
conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)
conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1
conf = conf / conf0.repeat(1, 9)
# conf = 1 - dist/distthresh
conf = mask * conf # nA x 9
mean_conf = torch.mean(conf, dim=1)
return mean_conf
def corner_confidence9(gt_corners, pr_corners, th=80, sharpness=2, im_width=1280, im_height=720):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (18,) type: list
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (18,), type: list
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a list of shape (9,) with 9 confidence values
'''
dist = torch.FloatTensor(gt_corners) - pr_corners
dist = dist.view(9, 2)
dist[:, 0] = dist[:, 0] * im_width
dist[:, 1] = dist[:, 1] * im_height
eps = 1e-5
dist = torch.sqrt(torch.sum((dist)**2, dim=1))
mask = (dist < th).type(torch.FloatTensor)
conf = torch.exp(sharpness * (1.0 - dist/th)) - 1
conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps
conf = conf / conf0.repeat(9, 1)
# conf = 1.0 - dist/th
conf = mask * conf
return torch.mean(conf)
@vectorize([float64(float64)])
def sigmoid(x):
return 1.0/(math.exp(-x)+1.)
def softmax_torch(x):
x = torch.exp(x - torch.max(x))
x = x/x.sum()
return x
def nms(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
# print("unsorted")
# print_class_and_conf(boxes)
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
# print("sorted")
# print_class_and_conf(out_boxes)
return out_boxes
def nms_v2(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
print("unsorted")
print_class_and_conf(boxes)
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
print("sorted")
print_class_and_conf(out_boxes)
return out_boxes
def print_class_and_conf(boxes):
for box in boxes:
print('class ', int(box[20]), 'conf ', '{:0.3f}'.format(float(box[18])))
def nms_multi(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][0][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[0][4] > 0:
out_boxes.append(box_i[0])
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[0][4] = 0
return out_boxes
def nms_multi_v2(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
# index 18 is the det_conf i.e. confidence of the detected object
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][18]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[18] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou_cube(box_i, box_j, x1y1x2y2=True) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[18] = 0
return out_boxes
# import the necessary packages
import numpy as np
# Malisiewicz et al.
def non_max_suppression_fast(boxes, overlapThresh):
boxes = np.asarray(boxes)
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
# if boxes.dtype.kind == "i":
# boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
# x1 = boxes[:,0]
# y1 = boxes[:,1]
# x2 = boxes[:,2]
# y2 = boxes[:,3]
# grab the front faces of the cube as bounding boxes
# point 1, 3, 5, 7 are points that form the front face of the cube
# point 3 and 5 are the upper left and lower right points of the rectangle, to be used for nms area overlap calculation
# nms algorithm x1 is point 3's X coordinate which has index 6 in the "boxes" array of length 21
# nms algorithm y1 is point 3's Y coordinate which has index 7 in the "boxes" array of length 21
# nms algorithm x2 is point 5's X coordinate which has index 10 in the "boxes" array of length 21
# nms algorithm y2 is point 5's y coordinate which has index 11 in the "boxes" array of length 21
# With above chocie, we pick index 6, 7, 10 and 11 from the "boxes" array of length 21, for nms
x1 = boxes[:,6]
y1 = boxes[:,7]
x2 = boxes[:,10]
y2 = boxes[:,11]
# print('x1', x1)
# print('y1', y1)
# print('x2', x2)
# print('y2', y2)
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
print('w', w)
print('h', h)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
print('overlap', overlap)
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
# print('boxes[pick]', boxes[pick])
return boxes[pick].tolist()
def fix_corner_order(corners2D_gt):
corners2D_gt_corrected = np.zeros((9, 2), dtype='float32')
corners2D_gt_corrected[0, :] = corners2D_gt[0, :]
corners2D_gt_corrected[1, :] = corners2D_gt[1, :]
corners2D_gt_corrected[2, :] = corners2D_gt[3, :]
corners2D_gt_corrected[3, :] = corners2D_gt[5, :]
corners2D_gt_corrected[4, :] = corners2D_gt[7, :]
corners2D_gt_corrected[5, :] = corners2D_gt[2, :]
corners2D_gt_corrected[6, :] = corners2D_gt[4, :]
corners2D_gt_corrected[7, :] = corners2D_gt[6, :]
corners2D_gt_corrected[8, :] = corners2D_gt[8, :]
return corners2D_gt_corrected
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
# custom function
@cython.boundscheck(False)
def get_region_boxes(output, conf_thresh, num_classes, only_objectness=1, validation=False):
t0minus = time.time()
# Parameters
anchor_dim = 1
#if output.dim() == 3:
#output = output.cpu().numpy()
print('output numpy shape ',output.shape)
if output.shape == 3:
output = output.unsqueeze(0) #TODO
batch = output.shape[0]
assert(output.shape[1] == (19+num_classes)*anchor_dim)
h = output.shape[2]
w = output.shape[3]
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
output = output.reshape(batch*anchor_dim, 19+num_classes, h*w)#.transpose(0,1).ascontiguousarray(output)
#print('reshaped output numpy has shape ',output.shape)
output = np.transpose(output, (1,0,2))
#print('reshaped output numpy has shape ',output.shape)
output = np.ascontiguousarray(output)
#print('reshaped output numpy has shape ',output.shape)
output = output.reshape(19+num_classes, batch*anchor_dim*h*w)
#grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()
temp_x = np.linspace(0, w-1, w)
temp_x = np.tile(temp_x, (h,1))
temp_x = np.tile(temp_x, (batch*anchor_dim, 1, 1))
grid_x = temp_x.reshape(batch*anchor_dim*h*w)
temp_y = np.linspace(0, h-1, h)
temp_y = np.tile(temp_y,(w,1))
temp_y = np.transpose(temp_y, (1,0))
grid_y = np.tile(temp_y, (batch*anchor_dim, 1, 1)).reshape(batch*anchor_dim*h*w)
# define vectorized sigmoid
sigmoid_v = np.vectorize(sigmoid)
xs0 = sigmoid_v(output[0]) + grid_x
ys0 = sigmoid_v(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = sigmoid_v(output[18])
output_transpose = np.transpose(output[19:19+num_classes], (1,0))
cls_confs = softmax(output_transpose)
cls_max_ids = np.argmax(cls_confs, 1)
cls_max_confs = np.amax(cls_confs, 1)
cls_max_confs = cls_max_confs.reshape(-1)
cls_max_ids = cls_max_ids.reshape(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*anchor_dim
# det_confs = convert2cpu(det_confs)
# cls_max_confs = convert2cpu(cls_max_confs)
# cls_max_ids = convert2cpu_long(cls_max_ids)
# xs0 = convert2cpu(xs0)
# ys0 = convert2cpu(ys0)
# xs1 = convert2cpu(xs1)
# ys1 = convert2cpu(ys1)
# xs2 = convert2cpu(xs2)
# ys2 = convert2cpu(ys2)
# xs3 = convert2cpu(xs3)
# ys3 = convert2cpu(ys3)
# xs4 = convert2cpu(xs4)
# ys4 = convert2cpu(ys4)
# xs5 = convert2cpu(xs5)
# ys5 = convert2cpu(ys5)
# xs6 = convert2cpu(xs6)
# ys6 = convert2cpu(ys6)
# xs7 = convert2cpu(xs7)
# ys7 = convert2cpu(ys7)
# xs8 = convert2cpu(xs8)
# ys8 = convert2cpu(ys8)
#if validation:
#cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(anchor_dim):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if conf > max_conf:
max_conf = conf
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
if len(boxes) == 0:
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = cls_max_confs[max_ind]
cls_max_id = cls_max_ids[max_ind]
det_conf = det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
boxes.append(box)
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
all_boxes.append(boxes)
t3 = time.time()
if True:
print('---------------------------------')
print('gpu to cpu for numpy : %f' % (t0-t0minus))
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def get_region_boxes_multi(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False):
# Parameters
anchor_step = len(anchors)//num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (19+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
#output = output.view(batch*anchor_dim, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*anchor_dim*h*w)
output = output.view(batch*num_anchors, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs0 = torch.sigmoid(output[0]) + grid_x
ys0 = torch.sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = torch.sigmoid(output[18])
cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs0 = convert2cpu(xs0)
ys0 = convert2cpu(ys0)
xs1 = convert2cpu(xs1)
ys1 = convert2cpu(ys1)
xs2 = convert2cpu(xs2)
ys2 = convert2cpu(ys2)
xs3 = convert2cpu(xs3)
ys3 = convert2cpu(ys3)
xs4 = convert2cpu(xs4)
ys4 = convert2cpu(ys4)
xs5 = convert2cpu(xs5)
ys5 = convert2cpu(ys5)
xs6 = convert2cpu(xs6)
ys6 = convert2cpu(ys6)
xs7 = convert2cpu(xs7)
ys7 = convert2cpu(ys7)
xs8 = convert2cpu(xs8)
ys8 = convert2cpu(ys8)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if conf > max_conf:
max_conf = conf
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
if len(boxes) == 0:
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = cls_max_confs[max_ind]
cls_max_id = cls_max_ids[max_ind]
det_conf = det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
boxes.append(box)
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def get_corresponding_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, correspondingclass, only_objectness=1, validation=False):
debug = False
# Parameters
anchor_step = len(anchors)//num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
if debug:
print('output.size(1) ', output.size(1) )
print('(19+num_classes)*num_anchors)', (19+num_classes)*num_anchors)
assert(output.size(1) == (19+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
max_cls_conf = -100000
output = output.view(batch*num_anchors, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs0 = torch.sigmoid(output[0]) + grid_x
ys0 = torch.sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = torch.sigmoid(output[18])
cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs0 = convert2cpu(xs0)
ys0 = convert2cpu(ys0)
xs1 = convert2cpu(xs1)
ys1 = convert2cpu(ys1)
xs2 = convert2cpu(xs2)
ys2 = convert2cpu(ys2)
xs3 = convert2cpu(xs3)
ys3 = convert2cpu(ys3)
xs4 = convert2cpu(xs4)
ys4 = convert2cpu(ys4)
xs5 = convert2cpu(xs5)
ys5 = convert2cpu(ys5)
xs6 = convert2cpu(xs6)
ys6 = convert2cpu(ys6)
xs7 = convert2cpu(xs7)
ys7 = convert2cpu(ys7)
xs8 = convert2cpu(xs8)
ys8 = convert2cpu(ys8)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if (det_confs[ind] > max_conf) and (cls_confs[ind, correspondingclass] > max_cls_conf):
max_conf = det_confs[ind]
max_cls_conf = cls_confs[ind, correspondingclass]
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
boxesnp = np.array(boxes)
if (len(boxes) == 0) or (not (correspondingclass in boxesnp[:,20])):
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = max_cls_conf # cls_max_confs[max_ind]
cls_max_id = correspondingclass # cls_max_ids[max_ind]
det_conf = max_conf # det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
# experiment: chris commented out
boxes.append(box)
# print(boxes)
# experiment: chris commented out
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
@jit("float32(float32[:,:])", cache=False, nopython=True, nogil=True, parallel=True)
def esum(z):
return np.sum( | np.exp(z) | numpy.exp |
import matplotlib.pyplot as plt
import numpy as np
import tempfile
import os
import random
def display_visualization(ycoords, labels=None, marker_size=10):
n = ycoords.shape[0]
if labels is None:
labels = | np.ones(n) | numpy.ones |
import cvxpy
import numpy as np
class Control:
def __init__(self, model):
# Bind model
self.model = model
# Desired x_pos
self.xd = 0.0
# Control parameters
self.N = 100
# Control limits
self.umax = np.reshape(np.repeat(10, self.N), (self.N, 1))
self.xmax = np.reshape(np.repeat(1, 4 * self.N), (4 * self.N, 1))
# Control parameters
if self.model.name == 'Pendulum':
self.Q = np.mat([[100, 0, 0, 0], [0, 10, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self.R = np.mat(np.identity(1))
self.P = np.mat([[1000, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
else:
self.Q = np.mat([[1.0, 0.0], [0.0, 1.0]])
self.R = np.mat(np.identity(1))
self.P = np.mat([[1.0, 0.0], [0.0, 1.0]])
# Get dynamics
A = np.mat(self.model.A_disc)
B = np.mat(self.model.B_disc)
# Alternative to calculating Abar, Bbar, Cbar, and Ahat
Abar = np.vstack((np.zeros((len(A), self.N*len(A))), np.hstack((np.kron(np.eye(self.N-1), A),
np.zeros((len(A)*(self.N-1), len(A)))))))
Bbar = np.kron(np.eye(self.N), B)
self.Ahat = (np.identity(np.shape(Abar)[0]) - Abar).I * np.kron(np.identity(self.N), A)[:, 0:len(A)]
self.Cbar = (np.identity(np.shape(Abar)[0]) - Abar).I * Bbar
# Calculate penalty matrices
tm1 = np.eye(self.N)
tm1[self.N - 1, self.N - 1] = 0
tm2 = np.zeros((self.N, self.N))
tm2[self.N - 1, self.N - 1] = 1
self.Qbar = np.kron(tm1, self.Q) + np.kron(tm2, self.P)
self.Rbar = np.kron( | np.eye(self.N) | numpy.eye |
import numpy as np
import scipy.stats as stats
import scipy.linalg as la
import scipy.optimize as optimize
import scipy.integrate as integrate
import sklearn.linear_model
import kernels
import ep_fast
#import EP_cython
np.set_printoptions(precision=4, linewidth=200)
class GradientFields():
def __init__(self, K_nodiag, s0, t_i, prev):
normPDF = stats.norm(0,1)
try: t_i[0]
except: t_i = np.zeros(K_nodiag.shape[0]) + t_i
#general computations (always the same if the fixed effects are 0!!!!!)
self.Ki = normPDF.sf(t_i)
self.Ps = s0 + (1-s0)*self.Ki
self.Pi = self.Ki / self.Ps
self.stdY = np.sqrt(self.Pi * (1-self.Pi))
#compute Atag0 and B0
self.phi_ti = normPDF.pdf(t_i)
self.phitphit = np.outer(self.phi_ti, self.phi_ti)
self.stdY_mat = np.outer(self.stdY, self.stdY)
mat1_temp = self.phi_ti / self.stdY
self.mat1 = np.outer(mat1_temp, mat1_temp)
sumProbs_temp = np.tile(self.Pi, (K_nodiag.shape[0], 1))
sumProbs = sumProbs_temp + sumProbs_temp.T
Atag0_B0_inner_vec = self.Pi*(1-s0)
self.mat2 = np.outer(Atag0_B0_inner_vec, Atag0_B0_inner_vec) + 1-sumProbs*(1-s0)
self.Atag0 = self.mat1*self.mat2
self.B0 = np.outer(self.Ps, self.Ps)
#Compute the elements of the function value (the squared distance between the observed and expected pairwise phenotypic covariance)
self.K_nodiag_AB0 = K_nodiag * self.Atag0/self.B0
self.K_nodiag_sqr_AB0 = K_nodiag * self.K_nodiag_AB0
class PrevTest():
def __init__(self, n, m, prev, useFixed, h2Scale=1.0, prng=None, num_generate=None):
self.prng = prng
if (prng is None): self.prng = np.random.RandomState(args.seed)
self.n = n
self.useFixed = useFixed
self.h2Scale = h2Scale
if num_generate is None:
if prev == 0.5:
numGeno = n
else:
numGeno = np.maximum(int(float(self.n)/float(2*prev)), 25000)
else:
numGeno = num_generate
#generate SNPs
mafs = self.prng.rand(m) * 0.45 + 0.05
self.X = prng.binomial(2, mafs, size=(numGeno, m)).astype(np.float)
mafs_estimated = mafs.copy()
self.X_estimated = self.X.copy()
self.X -= 2*mafs
self.X_estimated -= 2*mafs_estimated
self.X /= np.sqrt(2*mafs*(1-mafs))
self.X_estimated /= np.sqrt(2*mafs_estimated*(1-mafs_estimated))
self.m = m
self.n = n
X_mean_diag = np.mean(np.einsum('ij,ij->i', self.X, self.X)) / self.X.shape[1]
X_estimated_mean_diag = np.mean(np.einsum('ij,ij->i', self.X_estimated, self.X_estimated)) / self.X.shape[1]
self.diag_ratio = X_estimated_mean_diag / X_mean_diag
self.prev = prev
#approx coeffs lam_i and c_i for logistic likelihood
self.logistic_c = np.array([1.146480988574439e+02, -1.508871030070582e+03, 2.676085036831241e+03, -1.356294962039222e+03, 7.543285642111850e+01])
self.logistic_lam = np.sqrt(2)*np.array([0.44 ,0.41, 0.40, 0.39, 0.36])
self.logistic_lam2 = self.logistic_lam**2
self.logistic_clam = self.logistic_c * self.logistic_lam
def genData(self, h2, eDist, numFixed, ascertain=True, scaleG=False, extraSNPs=0, fixedVar=0, frac_cases=0.5, kernel='linear', rbf_scale=1.0):
args.seed += 1
self.true_h2 = h2
self.ascertain = ascertain
self.eDist = eDist
if (numFixed==0): fixedVar=0
if (numFixed > 0): assert fixedVar>0
self.fixedVar = fixedVar
self.covars = self.prng.randn(self.X.shape[0], numFixed)
if (eDist == 'normal' and not scaleG): sig2g = h2/(1-h2)
elif (eDist == 'normal' and scaleG): sig2g = h2
elif (eDist == 'logistic' and not scaleG): sig2g = (np.pi**2)/3.0 * h2 / (1 - h2)
elif (eDist == 'logistic' and scaleG): sig2g = h2
else: raise ValueError('unknown e_dist. Valid value are normal, logistic')
if kernel == 'linear':
self.beta = self.prng.randn(self.m) * | np.sqrt(sig2g/self.m) | numpy.sqrt |
from . import GeneExpressionDataset
from .anndataset import AnnDatasetFromAnnData, DownloadableAnnDataset
import torch
import pickle
import os
import numpy as np
import pandas as pd
import anndata
class AnnDatasetKeywords(GeneExpressionDataset):
def __init__(self, data, select_genes_keywords=[]):
super().__init__()
if isinstance(data, str):
anndataset = anndata.read(data)
else:
anndataset = data
idx_and_gene_names = [
(idx, gene_name) for idx, gene_name in enumerate(list(anndataset.var.index))
]
for keyword in select_genes_keywords:
idx_and_gene_names = [
(idx, gene_name)
for idx, gene_name in idx_and_gene_names
if keyword.lower() in gene_name.lower()
]
gene_indices = np.array([idx for idx, _ in idx_and_gene_names])
gene_names = np.array([gene_name for _, gene_name in idx_and_gene_names])
expression_mat = np.array(anndataset.X[:, gene_indices].todense())
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = (expression_mat > 0).mean(axis=0) > 0.21
gene_names = gene_names[select_genes]
expression_mat = expression_mat[:, select_genes]
print("Final dataset shape :", expression_mat.shape)
self.populate_from_data(X=expression_mat, gene_names=gene_names)
class ZhengDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
zheng = anndata.read(os.path.join(current_dir, "zheng_gemcode_control.h5ad"))
super(ZhengDataset, self).__init__(zheng, select_genes_keywords=["ercc"])
class MacosDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
macos = anndata.read(os.path.join(current_dir, "macosko_dropseq_control.h5ad"))
super(MacosDataset, self).__init__(macos, select_genes_keywords=["ercc"])
class KleinDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
klein = anndata.read(
os.path.join(current_dir, "klein_indrops_control_GSM1599501.h5ad")
)
super(KleinDataset, self).__init__(klein, select_genes_keywords=["ercc"])
class Sven1Dataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven1 = svens[svens.obs.query('sample == "20311"').index]
super(Sven1Dataset, self).__init__(sven1, select_genes_keywords=["ercc"])
class Sven2Dataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven2 = svens[svens.obs.query('sample == "20312"').index]
super(Sven2Dataset, self).__init__(sven2, select_genes_keywords=["ercc"])
class AnnDatasetRNA(GeneExpressionDataset):
def __init__(self, data, n_genes=100):
super().__init__()
if isinstance(data, str):
anndataset = anndata.read(data)
else:
anndataset = data
# Select RNA genes
idx_and_gene_names = [
(idx, gene_name)
for idx, gene_name in enumerate(list(anndataset.var.index))
if "ercc" not in gene_name.lower()
]
gene_indices = np.array([idx for idx, _ in idx_and_gene_names])
gene_names = np.array([gene_name for _, gene_name in idx_and_gene_names])
expression_mat = np.array(anndataset.X[:, gene_indices].todense())
# Find n_genes most expressed genes (wrt average gene expression)
argsort_genes_exp = np.argsort(np.mean(expression_mat, axis=0))
expression_mat = expression_mat[:, argsort_genes_exp[-n_genes:]]
gene_names = gene_names[argsort_genes_exp[-n_genes:]]
# Remove zero cells, then zero genes
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = (expression_mat > 0).mean(axis=0) >= 0.21
gene_names = gene_names[select_genes]
expression_mat = expression_mat[:, select_genes]
print("Final dataset shape :", expression_mat.shape)
self.populate_from_data(X=expression_mat, gene_names=gene_names)
class KleinDatasetRNA(AnnDatasetRNA):
def __init__(self, n_genes=100):
current_dir = os.path.dirname(os.path.realpath(__file__))
klein = anndata.read(
os.path.join(current_dir, "klein_indrops_control_GSM1599501.h5ad")
)
super(KleinDatasetRNA, self).__init__(klein, n_genes=n_genes)
class Sven1DatasetRNA(AnnDatasetRNA):
def __init__(self, n_genes=100):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven1 = svens[svens.obs.query('sample == "20311"').index]
super(Sven1DatasetRNA, self).__init__(sven1, n_genes=n_genes)
class Sven2DatasetRNA(AnnDatasetRNA):
def __init__(self, n_genes=100):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven2 = svens[svens.obs.query('sample == "20312"').index]
super(Sven2DatasetRNA, self).__init__(sven2, n_genes=n_genes)
class AnnDatasetMixed(GeneExpressionDataset):
def __init__(self, data, matching_func="l2", n_matches=3, threshold=0.01):
super().__init__()
assert matching_func in [
"l2",
"l2_sort",
"means",
"cosine",
"cosine_sort",
"random",
]
self.matching_func = matching_func
self.n_matches = n_matches
if isinstance(data, str):
anndataset = anndata.read(data)
else:
anndataset = data
expression_mat = np.array(anndataset.X.todense())
# Select ERCC genes
ercc_idx_and_gene_names = [
(idx, gene_name)
for idx, gene_name in enumerate(list(anndataset.var.index))
if "ercc" in gene_name.lower()
]
# Eliminate zero cells and zero genes
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = expression_mat.sum(axis=0) > 0
expression_mat = expression_mat[:, select_genes]
# Select ERCC genes
gene_names = np.array(
[
gene_name
for idx, gene_name in enumerate(list(anndataset.var.index))
if select_genes[idx]
]
)
ercc_gene_indices = np.array(
[
idx
for idx, gene_name in enumerate(gene_names)
if "ercc" in gene_name.lower()
]
)
# Match ERCC genes with RNA genes, select matched genes
selected_matched_genes = self._match_genes(expression_mat, ercc_gene_indices)
expression_mat = expression_mat[:, selected_matched_genes]
gene_names = gene_names[selected_matched_genes]
# Remove induced zero cells and keep only genes present in at least 21% of cells
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = (expression_mat > 0).mean(axis=0) >= threshold
gene_names = gene_names[select_genes]
expression_mat = expression_mat[:, select_genes]
print("Final dataset shape :", expression_mat.shape)
print(
"ERCC genes :",
len([gene_name for gene_name in gene_names if "ercc" in gene_name.lower()]),
)
self.is_ercc = np.array(
["ercc" in gene_name.lower() for gene_name in gene_names]
)
self.populate_from_data(X=expression_mat, gene_names=gene_names)
def _matching_func(self, ref_col, mat):
if self.matching_func == "l2":
return np.linalg.norm(mat - ref_col, axis=0)
elif self.matching_func == "l2_sort":
return np.linalg.norm(
np.sort(mat, axis=0) - np.sort(ref_col, axis=0), axis=0
)
elif self.matching_func == "means":
return np.abs(np.mean(mat, axis=0) - np.mean(ref_col))
elif self.matching_func == "cosine":
return 1.0 - np.sum(mat * ref_col, axis=0) / (
np.linalg.norm(mat, axis=0) * np.linalg.norm(ref_col)
)
elif self.matching_func == "cosine_sort":
return 1.0 - np.sum(
np.sort(mat, axis=0) * np.sort(ref_col, axis=0), axis=0
) / (np.linalg.norm(mat, axis=0) * np.linalg.norm(ref_col))
elif self.matching_func == "random":
np.random.seed(0)
return np.random.uniform(0.0, 100.0, size=(mat.shape[1],))
else:
raise Exception("Matching function not recognized")
def _match_given_gene(self, expression_mat, ref_gene_index, selected_genes):
scores = self._matching_func(
expression_mat[:, ref_gene_index][:, np.newaxis], expression_mat
)
scores[selected_genes] = np.inf
new_matches = np.arange(expression_mat.shape[1])[
np.argsort(scores)[: self.n_matches]
]
selected_genes[new_matches] = True
return selected_genes
def _match_genes(self, expression_mat, ref_gene_indices):
selected_genes = | np.zeros(shape=(expression_mat.shape[1],), dtype=bool) | numpy.zeros |
# Test that all of the examples in the galpy paper run
from __future__ import print_function, division
import os
import numpy
import pytest
def test_overview():
from galpy.potential import NFWPotential
np= NFWPotential(normalize=1.)
from galpy.orbit import Orbit
o= Orbit(vxvv=[1.,0.1,1.1,0.1,0.02,0.])
from galpy.actionAngle import actionAngleSpherical
aA= actionAngleSpherical(pot=np)
js= aA(o)
assert numpy.fabs((js[0]-0.00980542)/js[0]) < 10.**-3., 'Action calculation in the overview section has changed'
assert numpy.fabs((js[1]-1.1)/js[0]) < 10.**-3., 'Action calculation in the overview section has changed'
assert numpy.fabs((js[2]-0.00553155)/js[0]) < 10.**-3., 'Action calculation in the overview section has changed'
from galpy.df import quasiisothermaldf
qdf= quasiisothermaldf(1./3.,0.2,0.1,1.,1.,
pot=np,aA=aA)
assert numpy.fabs((qdf(o)-61.57476085)/61.57476085) < 10.**-3., 'qdf calculation in the overview section has changed'
return None
def test_import():
import galpy
import galpy.potential
import galpy.orbit
import galpy.actionAngle
import galpy.df
import galpy.util
return None
def test_units():
import galpy.util.bovy_conversion as conversion
print(conversion.force_in_pcMyr2(220.,8.))#pc/Myr^2
assert numpy.fabs(conversion.force_in_pcMyr2(220.,8.)-6.32793804994) < 10.**-4., 'unit conversion has changed'
print(conversion.dens_in_msolpc3(220.,8.))#Msolar/pc^3
# Loosen tolerances including mass bc of 0.025% change in Msun in astropyv2
assert numpy.fabs((conversion.dens_in_msolpc3(220.,8.)-0.175790330079)/0.175790330079) < 0.0003, 'unit conversion has changed'
print(conversion.surfdens_in_msolpc2(220.,8.))#Msolar/pc^2
assert numpy.fabs((conversion.surfdens_in_msolpc2(220.,8.)-1406.32264063)/1406.32264063) < 0.0003, 'unit conversion has changed'
print(conversion.mass_in_1010msol(220.,8.))#10^10 Msolar
assert numpy.fabs((conversion.mass_in_1010msol(220.,8.)-9.00046490005)/9.00046490005) < 0.0003, 'unit conversion has changed'
print(conversion.freq_in_Gyr(220.,8.))#1/Gyr
assert numpy.fabs(conversion.freq_in_Gyr(220.,8.)-28.1245845523) < 10.**-4., 'unit conversion has changed'
print(conversion.time_in_Gyr(220.,8.))#Gyr
assert numpy.fabs(conversion.time_in_Gyr(220.,8.)-0.0355560807712) < 10.**-4., 'unit conversion has changed'
return None
def test_potmethods():
from galpy.potential import DoubleExponentialDiskPotential
dp= DoubleExponentialDiskPotential(normalize=1.,
hr=3./8.,hz=0.3/8.)
dp(1.,0.1) # The potential itself at R=1., z=0.1
assert numpy.fabs(dp(1.,0.1)+1.1037196286636572) < 10.**-4., 'potmethods has changed'
dp.Rforce(1.,0.1) # The radial force
assert numpy.fabs(dp.Rforce(1.,0.1)+0.9147659436328015) < 10.**-4., 'potmethods has changed'
dp.zforce(1.,0.1) # The vertical force
assert numpy.fabs(dp.zforce(1.,0.1)+0.50056789703079607) < 10.**-4., 'potmethods has changed'
dp.R2deriv(1.,0.1) # The second radial derivative
assert numpy.fabs(dp.R2deriv(1.,0.1)+1.0189440730205248) < 10.**-4., 'potmethods has changed'
dp.z2deriv(1.,0.1) # The second vertical derivative
assert numpy.fabs(dp.z2deriv(1.,0.1)-1.0648350937842703) < 10.**-4., 'potmethods has changed'
dp.Rzderiv(1.,0.1) # The mixed radial,vertical derivative
assert numpy.fabs(dp.Rzderiv(1.,0.1)+1.1872449759212851) < 10.**-4., 'potmethods has changed'
dp.dens(1.,0.1) # The density
assert numpy.fabs(dp.dens(1.,0.1)-0.076502355610946121) < 10.**-4., 'potmethods has changed'
dp.dens(1.,0.1,forcepoisson=True) # Using Poisson's eqn.
assert numpy.fabs(dp.dens(1.,0.1,forcepoisson=True)-0.076446652249682681) < 10.**-4., 'potmethods has changed'
dp.mass(1.,0.1) # The mass
assert numpy.fabs(dp.mass(1.,0.1)-0.7281629803939751) < 10.**-4., 'potmethods has changed'
dp.vcirc(1.) # The circular velocity at R=1.
assert numpy.fabs(dp.vcirc(1.)-1.0) < 10.**-4., 'potmethods has changed' # By definition, because of normalize=1.
dp.omegac(1.) # The rotational frequency
assert numpy.fabs(dp.omegac(1.)-1.0) < 10.**-4., 'potmethods has changed' # Also because of normalize=1.
dp.epifreq(1.) # The epicycle frequency
assert numpy.fabs(dp.epifreq(1.)-1.3301123099210266) < 10.**-4., 'potmethods has changed'
dp.verticalfreq(1.) # The vertical frequency
assert numpy.fabs(dp.verticalfreq(1.)-3.7510872575640293) < 10.**-4., 'potmethods has changed'
dp.flattening(1.,0.1) #The flattening (see caption)
assert numpy.fabs(dp.flattening(1.,0.1)-0.42748757564198159) < 10.**-4., 'potmethods has changed'
dp.lindbladR(1.75,m='corotation') # co-rotation resonance
assert numpy.fabs(dp.lindbladR(1.75,m='corotation')-0.540985051273488) < 10.**-4., 'potmethods has changed'
return None
from galpy.potential import Potential
def smoothInterp(t,dt,tform):
"""Smooth interpolation in time, following Dehnen (2000)"""
if t < tform: smooth= 0.
elif t > (tform+dt): smooth= 1.
else:
xi= 2.*(t-tform)/dt-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
return smooth
class TimeInterpPotential(Potential):
"""Potential that smoothly interpolates in time between two static potentials"""
def __init__(self,pot1,pot2,dt=100.,tform=50.):
"""pot1= potential for t < tform, pot2= potential for t > tform+dt, dt: time over which to turn on pot2,
tform: time at which the interpolation is switched on"""
Potential.__init__(self,amp=1.)
self._pot1= pot1
self._pot2= pot2
self._tform= tform
self._dt= dt
return None
def _Rforce(self,R,z,phi=0.,t=0.):
smooth= smoothInterp(t,self._dt,self._tform)
return (1.-smooth)*self._pot1.Rforce(R,z)+smooth*self._pot2.Rforce(R,z)
def _zforce(self,R,z,phi=0.,t=0.):
smooth= smoothInterp(t,self._dt,self._tform)
return (1.-smooth)*self._pot1.zforce(R,z)+smooth*self._pot2.zforce(R,z)
def test_TimeInterpPotential():
#Just to check that the code above has run properly
from galpy.potential import LogarithmicHaloPotential, \
MiyamotoNagaiPotential
lp= LogarithmicHaloPotential(normalize=1.)
mp= MiyamotoNagaiPotential(normalize=1.)
tip= TimeInterpPotential(lp,mp)
assert numpy.fabs(tip.Rforce(1.,0.1,t=10.)-lp.Rforce(1.,0.1)) < 10.**-8., 'TimeInterPotential does not work as expected'
assert numpy.fabs(tip.Rforce(1.,0.1,t=200.)-mp.Rforce(1.,0.1)) < 10.**-8., 'TimeInterPotential does not work as expected'
return None
@pytest.mark.skip(reason="Test does not work correctly")
def test_potentialAPIChange_warning():
# Test that a warning is displayed about the API change for evaluatePotentials etc. functions from what is given in the galpy paper
#Turn warnings into errors to test for them
import warnings
from galpy.util import galpyWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
import galpy.potential
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "A major change in versions > 1.1 is that all galpy.potential functions and methods take the potential as the first argument; previously methods such as evaluatePotentials, evaluateDensities, etc. would be called with (R,z,Pot), now they are called as (Pot,R,z) for greater consistency across the codebase")
if raisedWarning: break
assert raisedWarning, "Importing galpy.potential does not raise warning about evaluatePotentials API change"
return None
def test_orbitint():
import numpy
from galpy.potential import MWPotential2014
from galpy.potential import evaluatePotentials as evalPot
from galpy.orbit import Orbit
E, Lz= -1.25, 0.6
o1= Orbit([0.8,0.,Lz/0.8,0.,numpy.sqrt(2.*(E-evalPot(MWPotential2014,0.8,0.)-(Lz/0.8)**2./2.)),0.])
ts= numpy.linspace(0.,100.,2001)
o1.integrate(ts,MWPotential2014)
o1.plot(xrange=[0.3,1.],yrange=[-0.2,0.2],color='k')
o2= Orbit([0.8,0.3,Lz/0.8,0.,numpy.sqrt(2.*(E-evalPot(MWPotential2014,0.8,0.)-(Lz/0.8)**2./2.-0.3**2./2.)),0.])
o2.integrate(ts,MWPotential2014)
o2.plot(xrange=[0.3,1.],yrange=[-0.2,0.2],color='k')
return None
def test_orbmethods():
from galpy.orbit import Orbit
from galpy.potential import MWPotential2014
o= Orbit([0.8,0.3,0.75,0.,0.2,0.]) # setup R,vR,vT,z,vz,phi
times= numpy.linspace(0.,10.,1001) # Output times
o.integrate(times,MWPotential2014) # Integrate
o.E() # Energy
assert numpy.fabs(o.E()+1.2547650648697966) < 10.**-5., 'Orbit method does not work as expected'
o.L() # Angular momentum
assert numpy.all(numpy.fabs(o.L()-numpy.array([[ 0. , -0.16, 0.6 ]])) < 10.**-5.), 'Orbit method does not work as expected'
o.Jacobi(OmegaP=0.65) #Jacobi integral E-OmegaP Lz
assert numpy.fabs(o.Jacobi(OmegaP=0.65)-numpy.array([-1.64476506])) < 10.**-5., 'Orbit method does not work as expected'
o.ER(times[-1]), o.Ez(times[-1]) # Rad. and vert. E at end
assert numpy.fabs(o.ER(times[-1])+1.27601734263047) < 10.**-5., 'Orbit method does not work as expected'
assert numpy.fabs(o.Ez(times[-1])-0.021252201847851909) < 10.**-5., 'Orbit method does not work as expected'
o.rperi(), o.rap(), o.zmax() # Peri-/apocenter r, max. |z|
assert numpy.fabs(o.rperi()-0.44231993168097) < 10.**-5., 'Orbit method does not work as expected'
assert numpy.fabs(o.rap()-0.87769030382105) < 10.**-5., 'Orbit method does not work as expected'
assert numpy.fabs(o.zmax()-0.077452357289016) < 10.**-5., 'Orbit method does not work as expected'
o.e() # eccentricity (rap-rperi)/(rap+rperi)
assert numpy.fabs(o.e()-0.32982348199330563) < 10.**-5., 'Orbit method does not work as expected'
o.R(2.,ro=8.) # Cylindrical radius at time 2. in kpc
assert numpy.fabs(o.R(2.,ro=8.)-3.5470772876920007) < 10.**-3., 'Orbit method does not work as expected'
o.vR(5.,vo=220.) # Cyl. rad. velocity at time 5. in km/s
assert numpy.fabs(o.vR(5.,vo=220.)-45.202530965094553) < 10.**-3., 'Orbit method does not work as expected'
o.ra(1.), o.dec(1.) # RA and Dec at t=1. (default settings)
# 5/12/2016: test weakened, because improved galcen<->heliocen
# transformation has changed these, but still close
assert numpy.fabs(o.ra(1.)-numpy.array([ 288.19277])) < 10.**-1., 'Orbit method does not work as expected'
assert numpy.fabs(o.dec(1.)-numpy.array([ 18.98069155])) < 10.**-1., 'Orbit method does not work as expected'
o.jr(type='adiabatic'), o.jz() # R/z actions (ad. approx.)
assert numpy.fabs(o.jr(type='adiabatic')-0.05285302231137586) < 10.**-3., 'Orbit method does not work as expected'
assert numpy.fabs(o.jz()-0.006637988850751242) < 10.**-3., 'Orbit method does not work as expected'
# Rad. period w/ Staeckel approximation w/ focal length 0.5,
o.Tr(type='staeckel',delta=0.5,ro=8.,vo=220.) # in Gyr
assert numpy.fabs(o.Tr(type='staeckel',delta=0.5,ro=8.,vo=220.)-0.1039467864018446) < 10.**-3., 'Orbit method does not work as expected'
o.plot(d1='R',d2='z') # Plot the orbit in (R,z)
o.plot3d() # Plot the orbit in 3D, w/ default [x,y,z]
return None
def test_orbsetup():
from galpy.orbit import Orbit
o= Orbit([25.,10.,2.,5.,-2.,50.],radec=True,ro=8.,
vo=220.,solarmotion=[-11.1,25.,7.25])
return None
def test_surfacesection():
#Preliminary code
import numpy
from galpy.potential import MWPotential2014
from galpy.potential import evaluatePotentials as evalPot
from galpy.orbit import Orbit
E, Lz= -1.25, 0.6
o1= Orbit([0.8,0.,Lz/0.8,0.,numpy.sqrt(2.*(E-evalPot(MWPotential2014,0.8,0.)-(Lz/0.8)**2./2.)),0.])
ts= numpy.linspace(0.,100.,2001)
o1.integrate(ts,MWPotential2014)
o2= Orbit([0.8,0.3,Lz/0.8,0.,numpy.sqrt(2.*(E-evalPot(MWPotential2014,0.8,0.)-(Lz/0.8)**2./2.-0.3**2./2.)),0.])
o2.integrate(ts,MWPotential2014)
def surface_section(Rs,zs,vRs):
# Find points where the orbit crosses z from - to +
shiftzs= numpy.roll(zs,-1)
indx= (zs[:-1] < 0.)*(shiftzs[:-1] > 0.)
return (Rs[:-1][indx],vRs[:-1][indx])
# Calculate and plot the surface of section
ts= numpy.linspace(0.,1000.,20001) # long integration
o1.integrate(ts,MWPotential2014)
o2.integrate(ts,MWPotential2014)
sect1Rs,sect1vRs=surface_section(o1.R(ts),o1.z(ts),o1.vR(ts))
sect2Rs,sect2vRs=surface_section(o2.R(ts),o2.z(ts),o2.vR(ts))
from matplotlib.pyplot import plot, xlim, ylim
plot(sect1Rs,sect1vRs,'bo',mec='none')
xlim(0.3,1.); ylim(-0.69,0.69)
plot(sect2Rs,sect2vRs,'yo',mec='none')
return None
def test_adinvariance():
from galpy.potential import IsochronePotential
from galpy.orbit import Orbit
from galpy.actionAngle import actionAngleIsochrone
# Initialize two different IsochronePotentials
ip1= IsochronePotential(normalize=1.,b=1.)
ip2= IsochronePotential(normalize=0.5,b=1.)
# Use TimeInterpPotential to interpolate smoothly
tip= TimeInterpPotential(ip1,ip2,dt=100.,tform=50.)
# Integrate: 1) Orbit in the first isochrone potential
o1= Orbit([1.,0.1,1.1,0.0,0.1,0.])
ts= numpy.linspace(0.,50.,1001)
o1.integrate(ts,tip)
o1.plot(d1='x',d2='y',xrange=[-1.6,1.6],yrange=[-1.6,1.6],
color='b')
# 2) Orbit in the transition
o2= o1(ts[-1]) # Last time step => initial time step
ts2= numpy.linspace(50.,150.,1001)
o2.integrate(ts2,tip)
o2.plot(d1='x',d2='y',overplot=True,color='g')
# 3) Orbit in the second isochrone potential
o3= o2(ts2[-1])
ts3= numpy.linspace(150.,200.,1001)
o3.integrate(ts3,tip)
o3.plot(d1='x',d2='y',overplot=True,color='r')
# Now we calculate energy, maximum height, and mean radius
print(o1.E(pot=ip1), (o1.rperi()+o1.rap())/2, o1.zmax())
assert numpy.fabs(o1.E(pot=ip1)+2.79921356237) < 10.**-4., 'Energy in the adiabatic invariance test is different'
assert numpy.fabs((o1.rperi()+o1.rap())/2-1.07854158141) < 10.**-4., 'mean radius in the adiabatic invariance test is different'
assert numpy.fabs(o1.zmax()-0.106331362938) < 10.**-4., 'zmax in the adiabatic invariance test is different'
print(o3.E(pot=ip2), (o3.rperi()+o3.rap())/2, o3.zmax())
assert numpy.fabs(o3.E(pot=ip2)+1.19677002624) < 10.**-4., 'Energy in the adiabatic invariance test is different'
assert numpy.fabs((o3.rperi()+o3.rap())/2-1.39962036137) < 10.**-4., 'mean radius in the adiabatic invariance test is different'
assert numpy.fabs(o3.zmax()-0.138364269321) < 10.**-4., 'zmax in the adiabatic invariance test is different'
# The orbit has clearly moved to larger radii,
# the actions are however conserved from beginning to end
aAI1= actionAngleIsochrone(ip=ip1); print(aAI1(o1))
js= aAI1(o1)
assert numpy.fabs(js[0]-numpy.array([ 0.00773779])) < 10.**-4., 'action in the adiabatic invariance test is different'
assert numpy.fabs(js[1]-numpy.array([ 1.1])) < 10.**-4., 'action in the adiabatic invariance test is different'
assert numpy.fabs(js[2]-numpy.array([ 0.0045361])) < 10.**-4., 'action in the adiabatic invariance test is different'
aAI2= actionAngleIsochrone(ip=ip2); print(aAI2(o3))
js= aAI2(o3)
assert numpy.fabs(js[0]-numpy.array([ 0.00773812])) < 10.**-4., 'action in the adiabatic invariance test is different'
assert numpy.fabs(js[1]-numpy.array([ 1.1])) < 10.**-4., 'action in the adiabatic invariance test is different'
assert numpy.fabs(js[2]-numpy.array([ 0.0045361])) < 10.**-4., 'action in the adiabatic invariance test is different'
return None
def test_diskdf():
from galpy.df import dehnendf
# Init. dehnendf w/ flat rot., hr=1/3, hs=1, and sr(1)=0.2
df= dehnendf(beta=0.,profileParams=(1./3.,1.0,0.2))
# Same, w/ correction factors to scale profiles
dfc= dehnendf(beta=0.,profileParams=(1./3.,1.0,0.2),
correct=True,niter=20)
if True:
# Log. diff. between scale and DF surf. dens.
numpy.log(df.surfacemass(0.5)/df.targetSurfacemass(0.5))
assert numpy.fabs(numpy.log(df.surfacemass(0.5)/df.targetSurfacemass(0.5))+0.056954077791649592) < 10.**-4., 'diskdf does not behave as expected'
# Same for corrected DF
numpy.log(dfc.surfacemass(0.5)/dfc.targetSurfacemass(0.5))
assert numpy.fabs(numpy.log(dfc.surfacemass(0.5)/dfc.targetSurfacemass(0.5))+4.1440377205802041e-06) < 10.**-4., 'diskdf does not behave as expected'
# Log. diff between scale and DF sr
numpy.log(df.sigmaR2(0.5)/df.targetSigma2(0.5))
assert numpy.fabs(numpy.log(df.sigmaR2(0.5)/df.targetSigma2(0.5))+0.12786083001363127) < 10.**-4., 'diskdf does not behave as expected'
# Same for corrected DF
numpy.log(dfc.sigmaR2(0.5)/dfc.targetSigma2(0.5))
assert numpy.fabs(numpy.log(dfc.sigmaR2(0.5)/dfc.targetSigma2(0.5))+6.8065001252214986e-06) < 10.**-4., 'diskdf does not behave as expected'
# Evaluate DF w/ R,vR,vT
df(numpy.array([0.9,0.1,0.8]))
assert numpy.fabs(df(numpy.array([0.9,0.1,0.8]))-numpy.array(0.1740247246180417)) < 10.**-4., 'diskdf does not behave as expected'
# Evaluate corrected DF w/ Orbit instance
from galpy.orbit import Orbit
dfc(Orbit([0.9,0.1,0.8]))
assert numpy.fabs(dfc(Orbit([0.9,0.1,0.8]))-numpy.array(0.16834863725552207)) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the mean velocities
df.meanvR(0.9), df.meanvT(0.9)
assert numpy.fabs(df.meanvR(0.9)) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(df.meanvT(0.9)-0.91144428051168291) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the velocity dispersions
numpy.sqrt(dfc.sigmaR2(0.9)), numpy.sqrt(dfc.sigmaT2(0.9))
assert numpy.fabs(numpy.sqrt(dfc.sigmaR2(0.9))-0.22103383792719539) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(numpy.sqrt(dfc.sigmaT2(0.9))-0.17613725303902811) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the skew of the velocity distribution
df.skewvR(0.9), df.skewvT(0.9)
assert numpy.fabs(df.skewvR(0.9)) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(df.skewvT(0.9)+0.47331638366025863) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the kurtosis of the velocity distribution
df.kurtosisvR(0.9), df.kurtosisvT(0.9)
assert numpy.fabs(df.kurtosisvR(0.9)+0.13561300880237059) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(df.kurtosisvT(0.9)-0.12612702099300721) < 10.**-4., 'diskdf does not behave as expected'
# Calculate a higher-order moment of the velocity DF
df.vmomentsurfacemass(1.,6.,2.)/df.surfacemass(1.)
assert numpy.fabs(df.vmomentsurfacemass(1.,6.,2.)/df.surfacemass(1.)-0.00048953492205559054) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the Oort functions
dfc.oortA(1.), dfc.oortB(1.), dfc.oortC(1.), dfc.oortK(1.)
assert numpy.fabs(dfc.oortA(1.)-0.40958989067012197) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(dfc.oortB(1.)+0.49396172114486514) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(dfc.oortC(1.)) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(dfc.oortK(1.)) < 10.**-4., 'diskdf does not behave as expected'
# Sample Orbits from the DF, returns list of Orbits
numpy.random.seed(1)
os= dfc.sample(n=100,returnOrbit=True,nphi=1)
# check that these have the right mean radius = 2hr=2/3
rs= numpy.array([o.R() for o in os])
assert numpy.fabs( | numpy.mean(rs) | numpy.mean |
'''
This file describes our re-implementation of the best model proposed by Bisk et. al in their work "Natural Language Communication with Robots." All our advice models are built on top of this model.
The code flows top down and the parameters are at the top.
'''
from absl import flags
import os,random,sys
sys.path.append(".")
## Model Imports
import tensorflow as tf
tf.set_random_seed(20160905)
random.seed(20160427)
import numpy as np
np.set_printoptions(threshold=np.nan)
import pandas as pandas
import math
import time
import argparse
from TFLibraries.Layer import Layers
Layer = Layers()
# these are the main parameters you should edit
flags.DEFINE_integer("target", default=1, help="FLAGS.target is 1 if source coordinate prediction, else 2 if target prediction")
flags.DEFINE_string("model_save_path", default='savedModels/default_model/model.ckpt', help="Where to save the trained model.")
flags.DEFINE_string("train_file", default='data/STxyz_Blank/Train.mat', help='Where the training data mat file is located.')
flags.DEFINE_string("dev_file", default='data/STxyz_Blank/Dev.mat', help='Where the dev data mat file is located.')
flags.DEFINE_string("test_file", default='data/STxyz_Blank/Test.mat', help='Where the test data mat file is located.')
# these parameters can be left as default to train the model and achieve the performance we report
flags.DEFINE_integer("hidden_layer_size", default=256, help="hidden layer size for the LSTM and FC Layers.")
flags.DEFINE_integer("word_embedding_size", default=256, help="Size of the word embedding layer.")
flags.DEFINE_integer("epochs", default=60, help="How many FLAGS.epochs to run.")
flags.DEFINE_float("learning_rate", default=0.001, help="Learning rate. No decay implemented.")
flags.DEFINE_float("gradient_clip_threshold", default=5.0, help="When to do gradient clipping.")
flags.DEFINE_integer("maxlength", default=105, help="Maximum sentence length")
flags.DEFINE_integer("world_length", default=20, help="Length of the world (grid) array")
flags.DEFINE_integer("world_size", default=3, help="Width of the world (grid) array")
flags.DEFINE_integer("batch_size", default=9, help="How many examples per batch. Set to 9 because there are 9 different sentences per world configuration.")
flags.DEFINE_integer("ndirs", default=9, help="Dimension for the number of directions prediction FC layer.")
flags.DEFINE_integer("nblocks", default=20, help="Dimensions for the number of blocks prediction FC layer.")
flags.DEFINE_integer("ndims", default=3, help="How many output dimensions. Should be 3 due to 3 coordinates.")
flags.DEFINE_bool("performing_analysis", default=False, help="Whether we are performing analysis or not. If true, don't save the model and don't run training.")
FLAGS = flags.FLAGS
FLAGS(sys.argv)
def load_data(train_file, dev_file, test_file):
'''Load the data given the input files'''
# dictionaries to store all the data
training = {}
training_labels = {}
training_lens = {}
development = {}
development_labels = {}
development_lens = {}
testing = {}
testing_labels = {}
testing_lens = {}
# if FLAGS.target = 1, range = 1 to 3
range_start = 0
range_end = 3
if FLAGS.target == 2:
range_start = 3
range_end = 6
xvocab = 0
# Load the Data
print("Reading the data files...")
# A minibatch consists of a FLAGS.target location, a world, and 9 sentences that share the same FLAGS.target/world.
fileList = [FLAGS.train_file, FLAGS.dev_file, FLAGS.test_file]
# this list will hold train, dev, test files and all of their individual minibatches
all_data = []
for k in range(0, 3):
df = pandas.DataFrame([line.strip().split() for line in open(fileList[k], 'r')])
df.fillna("", inplace=True)
data = np.array(df)
# A minibatch consists of a FLAGS.target location, a world,
# and 9 sentences that share the same FLAGS.target/world.
minibatches = []
# go from 1 to d in steps of 9
# 11870
for i1 in range(0, data.shape[0] - 1, 9):
# go through each individual one
FLAGS.target = np.reshape(np.asarray(data[i1, range_start:range_end], dtype=np.float), (3, 1))
world = np.reshape(np.asarray(data[i1,6:66], dtype=np.float), (3,20), order='F')
sentences = []
for i in range(i1, i1+9):
# get in batches of 60
#@assert FLAGS.target == np.reshape(np.asarray(data[i1, range_start:range_end], dtype=np.float32), (3, 1))
# @assert world == np.reshape(np.asarray(data[i1,6:66]), (3,20))
sent = []
# maybe this is 67
for j in range (66, len(data[2])):
# if "", break, else add to the sentence
if data[i, j] == "":
break
sent.append(data[i,j])
if int(data[i, j]) > xvocab:
if fileList[k] == FLAGS.train_file:
xvocab = int(data[i, j])
sentences.append(sent)
minibatches.append((FLAGS.target, world, sentences))
all_data.append(minibatches)
return all_data, xvocab
print("Initializing the model...")
# load the data
all_data, xvocab = load_data(FLAGS.train_file, FLAGS.dev_file, FLAGS.test_file)
lastloss = bestloss = sys.maxint
# train is all_data[0], dev is all_data[1], test is all_data[2]
# FLAGS.maxlength = 83 since we can possible have an 83 word sentence and must pass in one by one
input_data = tf.placeholder(tf.int32, [FLAGS.batch_size, FLAGS.maxlength])
# store how many sequences to go until we have to update the gradients
lengths = tf.placeholder(tf.int32, [FLAGS.batch_size])
# correct outputs is a matrix of the correct x,y,z coordinates for each element in the batch
labels = tf.placeholder(tf.float32, [3, FLAGS.batch_size])
with tf.name_scope("dropout_placeholer"):
dropout_prob_placeholder = tf.placeholder_with_default(1.0, shape=())
embeddings = tf.Variable(tf.random_uniform([xvocab, FLAGS.word_embedding_size], -1, 1, seed=20160503))
# RNN architecture
multicells = 1
# the lstm cell
lstm = tf.contrib.rnn.LSTMCell(FLAGS.hidden_layer_size, state_is_tuple=True, initializer=tf.contrib.layers.xavier_initializer(seed=20160501))
# dropout cell
lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=dropout_prob_placeholder)
# create a cell of the LSTM cell
lstm = tf.contrib.rnn.MultiRNNCell(cells=[lstm] * multicells, state_is_tuple=True)
# we have two softmax, one of size FLAGS.nblocks, another of size FLAGS.ndirs
output_layer = {}
with tf.name_scope("output-20-weight"):
output_layer[0] = Layer.W(1 * FLAGS.hidden_layer_size, FLAGS.nblocks, 'OutputLayer')
with tf.name_scope("output-10-weight"):
output_layer[1] = Layer.W(1 * FLAGS.hidden_layer_size, FLAGS.ndirs, 'OutputLayer2')
# # add bias to them, not sure if needed
output_bias = {}
with tf.name_scope("output-20-bias"):
output_bias[0] = Layer.b(FLAGS.nblocks, 'OutputBias')
with tf.name_scope("output-10-bias"):
output_bias[1] = Layer.b(FLAGS.ndirs, 'OutputBias2')
# inputs
rnn_inputs = tf.nn.embedding_lookup(embeddings, input_data)
# make the RNN graph
with tf.variable_scope("lstm0"):
# create the rnn graph at run time
# sequence length allows us to input variable lengths
# tensorflow returns zero vectors for states and outputs only after the sequence length.
outputs, fstate = tf.nn.dynamic_rnn(cell=lstm, inputs=rnn_inputs,
sequence_length=lengths,
dtype=tf.float32, time_major=False)
logits = {}
with tf.name_scope("output1-20-compute"):
logits[0] = tf.matmul((fstate[0].h), output_layer[0]) + output_bias[0]
with tf.name_scope("output2-9-compute"):
logits[1] = tf.matmul((fstate[0].h), output_layer[1]) + output_bias[1]
# FLAGS.nblocks output
with tf.name_scope("softmax-20"):
refblock = tf.nn.softmax(logits[0])
# FLAGS.ndirs output
with tf.name_scope("softmax-9"):
direction = tf.nn.softmax(logits[1])
world_placeholder = tf.placeholder(tf.float32, [FLAGS.world_size, FLAGS.world_length])
# multiply the world by the softmax output of size FLAGS.nblocks (20)
refxyz = tf.matmul(world_placeholder, tf.transpose(refblock))
with tf.name_scope("Weights_9_to_3_Dims"):
output_dimensions = Layer.W(FLAGS.ndirs, FLAGS.ndims, name='OffsetWeights')
offset = tf.matmul(direction, output_dimensions)
# add these results together to get a matrix of size (3, 9)
with tf.name_scope("resulting_coordinate"):
result = refxyz + tf.transpose(offset)
# Learning
with tf.name_scope("regular_optimizer"):
loss = tf.reduce_mean(tf.squared_difference(result, labels))
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.gradient_clip_threshold)
optimize = optimizer.apply_gradients(zip(gradients, variables))
with tf.name_scope("correct_prediction"):
# distance from the coordinates normalized by the block length
correct_prediction = tf.reduce_sum([(tf.sqrt(tf.reduce_sum([(result[:, j][i] - labels[:, j][i])**2 for i in range(3) ]))/0.1524) for j in range(FLAGS.batch_size)])/FLAGS.batch_size
output_1 = ((tf.sqrt(tf.reduce_sum([(result[:, 0][i] - labels[:, 0][i])**2 for i in range(3) ]))/0.1524))
output_2 = ((tf.sqrt(tf.reduce_sum([(result[:, 1][i] - labels[:, 1][i])**2 for i in range(3) ]))/0.1524))
output_3 = ((tf.sqrt(tf.reduce_sum([(result[:, 2][i] - labels[:, 2][i])**2 for i in range(3) ]))/0.1524))
output_4 = ((tf.sqrt(tf.reduce_sum([(result[:, 3][i] - labels[:, 3][i])**2 for i in range(3) ]))/0.1524))
output_5 = ((tf.sqrt(tf.reduce_sum([(result[:, 4][i] - labels[:, 4][i])**2 for i in range(3) ]))/0.1524))
output_6 = ((tf.sqrt(tf.reduce_sum([(result[:, 5][i] - labels[:, 5][i])**2 for i in range(3) ]))/0.1524))
output_7 = ((tf.sqrt(tf.reduce_sum([(result[:, 6][i] - labels[:, 6][i])**2 for i in range(3) ]))/0.1524))
output_8 = ((tf.sqrt(tf.reduce_sum([(result[:, 7][i] - labels[:, 7][i])**2 for i in range(3) ]))/0.1524))
output_9 = ((tf.sqrt(tf.reduce_sum([(result[:, 8][i] - labels[:, 8][i])**2 for i in range(3) ]))/0.1524))
with tf.name_scope("actual_prediction"):
actual_prediction = result[:, 1]
## Training
saver = tf.train.Saver()
session = tf.Session()
session.run(tf.global_variables_initializer())
#saver.restore(session, FLAGS.model_save_path)
# train one set of minibatches
def train_test_model(sess, minibatches, batchsize, training=True):
''' Train the model if training=True, test otherwise.
Parameters:
sess:
Current tensorflow session.
minibatches:
Minibatches of data gotten from load_data. Pass in the one for the training, validation, or test data.
batchsize:
Batch size.
training:
True if training the model, false if just evaluating the predictions.
'''
sumloss = numloss = 0
y = np.zeros((3, batchsize), np.float)
mask = np.zeros(batchsize, np.uint8)
input_vector = np.zeros((batchsize, FLAGS.maxlength), np.int32)
total_loss = 0.0
predictions = []
# passing the data through the network
for (FLAGS.target, world, sents) in minibatches:
if len(sents) != batchsize:
print("Bad length, error")
# array to store the length of each sentence in the batch
sequenceSizeLength = []
for k in range(len(sents)):
sequenceSizeLength.append(len(sents[k]))
input_vector[:] = 0
for j in range(len(sents)):
s = sents[j]
for i in range(len(s)):
input_vector[j, i] = s[i]
y[:] = 0
y += FLAGS.target
# create the feed dict with the input data
feed_dict = {input_data: input_vector, lengths: sequenceSizeLength, world_placeholder: world, labels: y, dropout_prob_placeholder: 0.5}
# no dropout if not training
if training == False:
feed_dict = {input_data: input_vector, lengths: sequenceSizeLength, world_placeholder: world, labels: y}
if training == True:
_, current_loss = sess.run([optimize, loss], feed_dict=feed_dict)
total_loss += current_loss
else:
# do the evaluation
# getting all the coordinate outputs individually is good for evaluation and also allows us to easily compute the mean/median of the entire train/test set
resOutput1, resOutput2, resOutput3, resOutput4, resOutput5, resOutput6, resOutput7, resOutput8, resOutput9, made_prediction, resulting_values = sess.run([output_1, output_2, output_3, output_4, output_5, output_6, output_7, output_8, output_9, correct_prediction, result], feed_dict=feed_dict)
resOutputTotal = [resOutput1, resOutput2, resOutput3, resOutput4, resOutput5, resOutput6, resOutput7, resOutput8, resOutput9]
predictions.extend(resOutputTotal)
if training == True:
return total_loss
else:
return predictions
best_train_average = sys.maxint
best_test_average = sys.maxint
best_validation_average = sys.maxint
# do the training and evaluation
for epoch in range (FLAGS.epochs):
start_time = time.time()
# train if we are not in an analysis stage
if FLAGS.performing_analysis == False:
trainLoss = train_test_model(session, all_data[0], FLAGS.batch_size)
print('Epoch %d: %f' % (epoch, trainLoss))
# train dataset predictions
predictions = train_test_model(session, all_data[0], FLAGS.batch_size, training=False)
average0 = | np.mean(predictions) | numpy.mean |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions/values used repeatedly in different modules of
the ``builtin_frames`` package.
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import warnings
import numpy as np
from ... import units as u
from ...time import Time
from ...utils import iers
from ...utils.exceptions import AstropyWarning
from ..representation import CartesianRepresentation
# The UTC time scale is not properly defined prior to 1960, so Time('B1950',
# scale='utc') will emit a warning. Instead, we use Time('B1950', scale='tai')
# which is equivalent, but does not emit a warning.
EQUINOX_J2000 = Time('J2000', scale='utc')
EQUINOX_B1950 = Time('B1950', scale='tai')
# This is a time object that is the default "obstime" when such an attribute is
# necessary. Currently, we use J2000.
DEFAULT_OBSTIME = Time('J2000', scale='utc')
PIOVER2 = np.pi / 2.
#comes from the mean of the 1962-2014 IERS B data
_DEFAULT_PM = (0.035, 0.29)*u.arcsec
_IERS_HINT = """
If you need enough precision such that this matters (~<10 arcsec), you can
use the latest IERS predictions by running:
>>> from astropy.utils import iers
>>> iers.IERS.iers_table = iers.IERS_A.open(iers.IERS_A_URL)
"""
def cartrepr_from_matmul(pmat, coo, transpose=False):
"""
Note that pmat should be an ndarray, *not* a matrix.
"""
if pmat.shape[-2:] != (3, 3):
raise ValueError("tried to do matrix multiplication with an array that "
"doesn't end in 3x3")
if coo.isscalar:
# a simpler path for scalar coordinates
if transpose:
pmat = pmat.T
newxyz = np.sum(pmat * coo.cartesian.xyz, axis=-1)
else:
xyz = coo.cartesian.xyz.T
# these expression are the same as iterating over the first dimension of
# pmat and xyz and doing matrix multiplication on each in turn. resulting
# dimension is <coo shape> x 3
pmat = pmat.reshape(pmat.size//9, 3, 3)
if transpose:
pmat = pmat.transpose(0, 2, 1)
newxyz = np.sum(pmat * xyz.reshape(xyz.size//3, 1, 3), axis=-1).T
return CartesianRepresentation(newxyz)
def get_polar_motion(time):
"""
gets the two polar motion components in radians for use with apio13
"""
#get the polar motion from the IERS table
xp, yp, status = iers.IERS.open().pm_xy(time, return_status=True)
wmsg = None
if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
wmsg = ('Tried to get polar motions for times before IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those.')
xp.ravel()[status.ravel()==iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
yp.ravel()[status.ravel()==iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg, AstropyWarning)
if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
wmsg = ('Tried to get polar motions for times after IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those.' + _IERS_HINT)
xp.ravel()[status.ravel()==iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
yp.ravel()[status.ravel()==iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg, AstropyWarning)
return xp.to(u.radian).value, yp.to(u.radian).value
def _warn_iers(ierserr):
"""
Generate a warning for an IERSRangeerror
Parameters
----------
ierserr : An `~astropy.utils.iers.IERSRangeError`
"""
msg = '{0} Assuming UT1-UTC=0 for coordinate transformations.{1}'
warnings.warn(msg.format(ierserr.args[0], _IERS_HINT), AstropyWarning)
def get_dut1utc(time):
"""
This function is used to get UT1-UTC in coordinates because normally it
gives an error outside the IERS range, but in coordinates we want to allow
it to go through but with a warning.
"""
try:
return time.delta_ut1_utc
except iers.IERSRangeError as e:
_warn_iers(e)
return | np.zeros(time.shape) | numpy.zeros |
import nnabla as nn
import numpy as np
from .identity import IdentityConverter
from .helpers import GraphInfo
class BatchNormalizationLinearConverter(IdentityConverter):
"""
The parameters of the batch normalization replaced simple scale and bias.
Args:
black_list (list): Black list of the function list.
params (:obj:`OrderedDict`): Result of nn.get_parameters().
name (:obj:`str`): Prefix of the parameter scope.
"""
def __init__(self,
black_list=[], params=None,
name="bn-linear"):
super(BatchNormalizationLinearConverter, self).__init__(black_list,
params, name)
def convert(self, vroot, entry_variables):
"""
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
"""
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
cnt = 0
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
if func.name == "BatchNormalization":
bn_func = func
# TODO: should deal with both?
if bn_func.info.args["batch_stat"] == False:
o = self._bn_linear_conversion(bn_func, cnt)
cnt += 1
continue
# Identity conversion
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable
def _bn_linear_conversion(self, bn_func, cnt):
# Conversion
eps_data = bn_func.info.args["eps"]
beta_data = | np.squeeze(bn_func.inputs[1].d) | numpy.squeeze |
# -*- coding: utf-8 -*-
"""
@author: <NAME>, GEUS (Geological Survey of Denmark and Greenland)
"""
import rasterio
from rasterio.warp import calculate_default_transform, reproject, Resampling
import numpy as np
from osgeo import gdal, ogr, gdalconst
import geopandas as gpd
import matplotlib.pyplot as plt
# %%
# load ESA land classes
input_file = 'C:/Users/Pascal/Desktop/UZH_2020/ArcticHackathon/analyses/'\
+ 'ESA_land_classes/Svalbard.tif'
esalc_reader = rasterio.open(input_file)
esalc = esalc_reader.read(1)
# save metadata to create a file for glaciers
meta = esalc_reader.meta
#%%
# create mask for "permanent snow and ice"
glaciers = esalc == 220
# load ESA land classes
output_file = 'C:/Users/Pascal/Desktop/UZH_2020/ArcticHackathon/analyses/'\
+ 'ESA_land_classes/Svalbard_glaciers.tif'
with rasterio.open(output_file, 'w+', **meta) as dst:
dst.write(glaciers.astype('uint8'), 1)
# %%
# reproject Svalbard glaciers to WGS84
output_file_reproj = 'C:/Users/Pascal/Desktop/UZH_2020/ArcticHackathon/analyses/'\
+ 'ESA_land_classes/Svalbard_glaciers_wgs84.tif'
dst_crs = {'init': 'EPSG:4326'}
with rasterio.open(output_file) as src:
transform, width, height = calculate_default_transform(src.crs, dst_crs,
src.width,
src.height,
*src.bounds)
kwargs = src.meta.copy()
kwargs.update({'crs': dst_crs,'transform': transform, 'width': width,'height': height})
with rasterio.open(output_file_reproj, 'w', **kwargs) as dst:
reproject(source=rasterio.band(src, 1),destination=rasterio.band(dst, 1),
src_transform=src.transform,
src_crs=src.crs,
dst_transform=transform,
dst_crs=dst_crs,
resampling=Resampling.nearest)
# %%
# reproject Svalbard glaciers to WGS84
output_file_shp = 'C:/Users/Pascal/Desktop/UZH_2020/ArcticHackathon/analyses/'\
+ 'ESA_land_classes/Svalbard_glaciers_wgs84'
# create shapefile out of raster
def raster_polygonize(mask_temp_mp, shp_temp_mp):
src_ds = gdal.Open(mask_temp_mp)
srcband = src_ds.GetRasterBand(1)
dst_layername = shp_temp_mp
drv = ogr.GetDriverByName("ESRI Shapefile")
dst_ds = drv.CreateDataSource( dst_layername + ".shp" )
dst_layer = dst_ds.CreateLayer(dst_layername, srs = None )
fd = ogr.FieldDefn("DN", ogr.OFTInteger)
dst_layer.CreateField(fd)
dst_field = dst_layer.GetLayerDefn().GetFieldIndex("DN")
gdal.Polygonize(srcband, None, dst_layer, 0, [], callback=None)
del src_ds, dst_ds, dst_layer, dst_field
return dst_layername
raster_polygonize(output_file_reproj, output_file_shp)
# %%
output_file_shp_postproc = 'C:/Users/Pascal/Desktop/UZH_2020/ArcticHackathon/analyses/'\
+ 'ESA_land_classes/Svalbard_glaciers_wgs84_postproc.shp'
# post-processing on glacier polygons
shapefile = gpd.read_file(output_file_shp + ".shp")
shapefile['area'] = shapefile.area
# get rid of contour polygons
[shapefile.drop(shapefile['area'].idxmax(), inplace=True) for i in range(4)]
# shapefile_sorted_m = shapefile.to_crs("EPSG:32633")
shapefile_sorted = shapefile.sort_values(by='area')
# shapefile_sorted_processed = shapefile_sorted[shapefile_sorted.area > 0.5]
centroids = shapefile.centroid.reset_index(drop=True)
cx = [centroids[i].xy[0][0] for i in range(0, len(centroids))]
cy = [centroids[i].xy[1][0] for i in range(0, len(centroids))]
glaciers_wgs84_reader = rasterio.open(output_file_reproj)
glaciers_wgs84 = glaciers_wgs84_reader.read(1)
to_keep = []
ax = plt.subplot(111)
for i in range(0, len(cx)):
row, col = glaciers_wgs84_reader.index(cx[i], cy[i])
plt.scatter(cx[i], cy[i])
cell = glaciers_wgs84[row, col]
if cell == 0:
to_keep.append(True)
else:
to_keep.append(False)
shapefile_sorted['ice'] = to_keep
shapefile_sorted.boundary.plot(color='black', ax=ax)
shapefile_sorted[shapefile_sorted.ice == 1].boundary.plot(color='red', ax=ax)
# shapefile_sorted_degrees = shapefile_sorted.to_crs('EPSG:4326')
# ax = plt.subplot(111)
# shapefile_sorted.boundary.plot(color='black', ax=ax)
# shapefile_sorted_processed.boundary.plot(color='red', ax=ax)
# %%
shapefile_sorted_processed.to_file(output_file_shp_postproc)
# %%
from rasterio.mask import mask
from shapely.geometry import mapping
# extract the geometry in GeoJSON format
geoms = shapefile.geometry.values # list of shapely geometries
for i in range(0, len(geoms)):
geoms2 = [mapping(geoms[i])]
# extract the raster values values within the polygon
with rasterio.open(output_file_reproj) as src:
out_image, out_transform = mask(src, geoms2, crop=True)
print(np.shape(out_image))
vals = out_image.flatten()
if | np.sum(vals == 0) | numpy.sum |
"""
This module contains all the functions needed for extracting satellite-derived
shorelines (SDS)
Author: <NAME>, Water Research Laboratory, University of New South Wales
"""
# load modules
import os
import numpy as np
import matplotlib.pyplot as plt
import pdb
# image processing modules
import skimage.filters as filters
import skimage.measure as measure
import skimage.morphology as morphology
# machine learning modules
import sklearn
if sklearn.__version__[:4] == '0.20':
from sklearn.externals import joblib
else:
import joblib
from shapely.geometry import LineString
# other modules
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.cm as cm
from matplotlib import gridspec
import pickle
from datetime import datetime
from pylab import ginput
# CoastSat modules
from coastsat import SDS_tools, SDS_preprocess
np.seterr(all='ignore') # raise/ignore divisions by 0 and nans
# Main function for batch shoreline detection
def extract_shorelines(metadata, settings):
"""
Main function to extract shorelines from satellite images
KV WRL 2018
Arguments:
-----------
metadata: dict
contains all the information about the satellite images that were downloaded
settings: dict with the following keys
'inputs': dict
input parameters (sitename, filepath, polygon, dates, sat_list)
'cloud_thresh': float
value between 0 and 1 indicating the maximum cloud fraction in
the cropped image that is accepted
'cloud_mask_issue': boolean
True if there is an issue with the cloud mask and sand pixels
are erroneously being masked on the images
'buffer_size': int
size of the buffer (m) around the sandy pixels over which the pixels
are considered in the thresholding algorithm
'min_beach_area': int
minimum allowable object area (in metres^2) for the class 'sand',
the area is converted to number of connected pixels
'min_length_sl': int
minimum length (in metres) of shoreline contour to be valid
'sand_color': str
default', 'dark' (for grey/black sand beaches) or 'bright' (for white sand beaches)
'output_epsg': int
output spatial reference system as EPSG code
'check_detection': bool
if True, lets user manually accept/reject the mapped shorelines
'save_figure': bool
if True, saves a -jpg file for each mapped shoreline
'adjust_detection': bool
if True, allows user to manually adjust the detected shoreline
Returns:
-----------
output: dict
contains the extracted shorelines and corresponding dates + metadata
"""
sitename = settings['inputs']['sitename']
filepath_data = settings['inputs']['filepath']
filepath_models = os.path.join(os.getcwd(), 'classification', 'models')
# initialise output structure
output = dict([])
# create a subfolder to store the .jpg images showing the detection
filepath_jpg = os.path.join(filepath_data, sitename, 'jpg_files', 'detection')
if not os.path.exists(filepath_jpg):
os.makedirs(filepath_jpg)
# close all open figures
plt.close('all')
print('Mapping shorelines:')
# loop through satellite list
for satname in metadata.keys():
# get images
filepath = SDS_tools.get_filepath(settings['inputs'],satname)
filenames = metadata[satname]['filenames']
# initialise the output variables
output_timestamp = [] # datetime at which the image was acquired (UTC time)
output_shoreline = [] # vector of shoreline points
output_filename = [] # filename of the images from which the shorelines where derived
output_cloudcover = [] # cloud cover of the images
output_geoaccuracy = []# georeferencing accuracy of the images
output_idxkeep = [] # index that were kept during the analysis (cloudy images are skipped)
output_t_mndwi = [] # MNDWI threshold used to map the shoreline
# load classifiers (if sklearn version above 0.20, learn the new files)
str_new = ''
if not sklearn.__version__[:4] == '0.20':
str_new = '_new'
if satname in ['L5','L7','L8']:
pixel_size = 15
if settings['sand_color'] == 'dark':
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat_dark%s.pkl'%str_new))
elif settings['sand_color'] == 'bright':
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat_bright%s.pkl'%str_new))
else:
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat%s.pkl'%str_new))
elif satname == 'S2':
pixel_size = 10
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_S2%s.pkl'%str_new))
# convert settings['min_beach_area'] and settings['buffer_size'] from metres to pixels
buffer_size_pixels = np.ceil(settings['buffer_size']/pixel_size)
min_beach_area_pixels = np.ceil(settings['min_beach_area']/pixel_size**2)
# loop through the images
for i in range(len(filenames)):
print('\r%s: %d%%' % (satname,int(((i+1)/len(filenames))*100)), end='')
# get image filename
fn = SDS_tools.get_filenames(filenames[i],filepath, satname)
# preprocess image (cloud mask + pansharpening/downsampling)
im_ms, georef, cloud_mask, im_extra, im_QA, im_nodata = SDS_preprocess.preprocess_single(fn, satname, settings['cloud_mask_issue'])
# get image spatial reference system (epsg code) from metadata dict
image_epsg = metadata[satname]['epsg'][i]
# compute cloud_cover percentage (with no data pixels)
cloud_cover_combined = np.divide(sum(sum(cloud_mask.astype(int))),
(cloud_mask.shape[0]*cloud_mask.shape[1]))
if cloud_cover_combined > 0.99: # if 99% of cloudy pixels in image skip
continue
# remove no data pixels from the cloud mask
# (for example L7 bands of no data should not be accounted for)
cloud_mask_adv = np.logical_xor(cloud_mask, im_nodata)
# compute updated cloud cover percentage (without no data pixels)
cloud_cover = np.divide(sum(sum(cloud_mask_adv.astype(int))),
(sum(sum((~im_nodata).astype(int)))))
# skip image if cloud cover is above user-defined threshold
if cloud_cover > settings['cloud_thresh']:
continue
# calculate a buffer around the reference shoreline (if any has been digitised)
im_ref_buffer = create_shoreline_buffer(cloud_mask.shape, georef, image_epsg,
pixel_size, settings)
# classify image in 4 classes (sand, whitewater, water, other) with NN classifier
im_classif, im_labels = classify_image_NN(im_ms, im_extra, cloud_mask,
min_beach_area_pixels, clf)
# if adjust_detection is True, let the user adjust the detected shoreline
if settings['adjust_detection']:
date = filenames[i][:19]
skip_image, shoreline, t_mndwi = adjust_detection(im_ms, cloud_mask, im_labels,
im_ref_buffer, image_epsg, georef,
settings, date, satname, buffer_size_pixels)
# if the user decides to skip the image, continue and do not save the mapped shoreline
if skip_image:
continue
# otherwise map the contours automatically with one of the two following functions:
# if there are pixels in the 'sand' class --> use find_wl_contours2 (enhanced)
# otherwise use find_wl_contours2 (traditional)
else:
try: # use try/except structure for long runs
if sum(sum(im_labels[:,:,0])) < 10 : # minimum number of sand pixels
# compute MNDWI image (SWIR-G)
im_mndwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# find water contours on MNDWI grayscale image
contours_mwi, t_mndwi = find_wl_contours1(im_mndwi, cloud_mask, im_ref_buffer)
else:
# use classification to refine threshold and extract the sand/water interface
contours_mwi, t_mndwi = find_wl_contours2(im_ms, im_labels, cloud_mask,
buffer_size_pixels, im_ref_buffer)
except:
print('Could not map shoreline for this image: ' + filenames[i])
continue
# process the water contours into a shoreline
shoreline = process_shoreline(contours_mwi, cloud_mask, georef, image_epsg, settings)
# visualise the mapped shorelines, there are two options:
# if settings['check_detection'] = True, shows the detection to the user for accept/reject
# if settings['save_figure'] = True, saves a figure for each mapped shoreline
if settings['check_detection'] or settings['save_figure']:
date = filenames[i][:19]
if not settings['check_detection']:
plt.ioff() # turning interactive plotting off
skip_image = show_detection(im_ms, cloud_mask, im_labels, shoreline,
image_epsg, georef, settings, date, satname)
# if the user decides to skip the image, continue and do not save the mapped shoreline
if skip_image:
continue
# append to output variables
output_timestamp.append(metadata[satname]['dates'][i])
output_shoreline.append(shoreline)
output_filename.append(filenames[i])
output_cloudcover.append(cloud_cover)
output_geoaccuracy.append(metadata[satname]['acc_georef'][i])
output_idxkeep.append(i)
output_t_mndwi.append(t_mndwi)
# create dictionnary of output
output[satname] = {
'dates': output_timestamp,
'shorelines': output_shoreline,
'filename': output_filename,
'cloud_cover': output_cloudcover,
'geoaccuracy': output_geoaccuracy,
'idx': output_idxkeep,
'MNDWI_threshold': output_t_mndwi,
}
print('')
# close figure window if still open
if plt.get_fignums():
plt.close()
# change the format to have one list sorted by date with all the shorelines (easier to use)
output = SDS_tools.merge_output(output)
# save outputput structure as output.pkl
filepath = os.path.join(filepath_data, sitename)
with open(os.path.join(filepath, sitename + '_output.pkl'), 'wb') as f:
pickle.dump(output, f)
return output
###################################################################################################
# IMAGE CLASSIFICATION FUNCTIONS
###################################################################################################
def calculate_features(im_ms, cloud_mask, im_bool):
"""
Calculates features on the image that are used for the supervised classification.
The features include spectral normalized-difference indices and standard
deviation of the image for all the bands and indices.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
im_bool: np.array
2D array of boolean indicating where on the image to calculate the features
Returns:
-----------
features: np.array
matrix containing each feature (columns) calculated for all
the pixels (rows) indicated in im_bool
"""
# add all the multispectral bands
features = np.expand_dims(im_ms[im_bool,0],axis=1)
for k in range(1,im_ms.shape[2]):
feature = np.expand_dims(im_ms[im_bool,k],axis=1)
features = np.append(features, feature, axis=-1)
# NIR-G
im_NIRG = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,1], cloud_mask)
features = np.append(features, np.expand_dims(im_NIRG[im_bool],axis=1), axis=-1)
# SWIR-G
im_SWIRG = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
features = np.append(features, np.expand_dims(im_SWIRG[im_bool],axis=1), axis=-1)
# NIR-R
im_NIRR = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,2], cloud_mask)
features = np.append(features, np.expand_dims(im_NIRR[im_bool],axis=1), axis=-1)
# SWIR-NIR
im_SWIRNIR = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,3], cloud_mask)
features = np.append(features, np.expand_dims(im_SWIRNIR[im_bool],axis=1), axis=-1)
# B-R
im_BR = SDS_tools.nd_index(im_ms[:,:,0], im_ms[:,:,2], cloud_mask)
features = np.append(features, np.expand_dims(im_BR[im_bool],axis=1), axis=-1)
# calculate standard deviation of individual bands
for k in range(im_ms.shape[2]):
im_std = SDS_tools.image_std(im_ms[:,:,k], 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
# calculate standard deviation of the spectral indices
im_std = SDS_tools.image_std(im_NIRG, 1)
features = np.append(features, | np.expand_dims(im_std[im_bool],axis=1) | numpy.expand_dims |
import numpy
import numpy as np
import scipy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import lal
import lalsimulation
from lal.lal import PC_SI as LAL_PC_SI
import h5py
import warnings
import random
# Calculating the projection of complex vector v on complex vector u
def proj(u, v):
# notice: this algrithm assume denominator isn't zero
return u * numpy.vdot(v,u) / numpy.vdot(u,u)
# Calculating the normalized residual (= a new basis) of a vector vec from known bases
def gram_schmidt(bases, vec):
for i in numpy.arange(0,len(bases)):
vec = vec - proj(bases[i], vec)
return vec/numpy.sqrt(numpy.vdot(vec,vec)) # normalized new basis
# Calculating overlap of two waveforms
def overlap_of_two_waveforms(wf1, wf2):
wf1norm = wf1/numpy.sqrt(numpy.vdot(wf1,wf1)) # normalize the first waveform
wf2norm = wf2/numpy.sqrt(numpy.vdot(wf2,wf2)) # normalize the second waveform
diff = wf1norm - wf2norm
#overlap = 1 - 0.5*(numpy.vdot(diff,diff))
overlap = numpy.real(numpy.vdot(wf1norm, wf2norm))
return overlap
def spherical_to_cartesian(sph):
x = sph[0]*numpy.sin(sph[1])*numpy.cos(sph[2])
y = sph[0]*numpy.sin(sph[1])*numpy.sin(sph[2])
z = sph[0]*numpy.cos(sph[1])
car = [x,y,z]
return car
def get_m1m2_from_mcq(mc, q):
m2 = mc * q ** (-0.6) * (1+q)**0.2
m1 = m2 * q
return numpy.array([m1,m2])
def generate_a_waveform(m1, m2, spin1, spin2, ecc, lambda1, lambda2, iota, phiRef, distance, deltaF, f_min, f_max, waveFlags, approximant):
test_mass1 = m1 * lal.lal.MSUN_SI
test_mass2 = m2 * lal.lal.MSUN_SI
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_test, cross_test]=lalsimulation.SimInspiralChooseFDWaveform(test_mass1, test_mass2, spin1[0], spin1[1], spin1[2], spin2[0], spin2[1], spin2[2], distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp = plus_test.data.data
hp_test = hp[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
return hp_test
def generate_a_waveform_from_mcq(mc, q, spin1, spin2, ecc, lambda1, lambda2, iota, phiRef, distance, deltaF, f_min, f_max, waveFlags, approximant):
m1,m2 = get_m1m2_from_mcq(mc,q)
test_mass1 = m1 * lal.lal.MSUN_SI
test_mass2 = m2 * lal.lal.MSUN_SI
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_test, cross_test]=lalsimulation.SimInspiralChooseFDWaveform(test_mass1, test_mass2, spin1[0], spin1[1], spin1[2], spin2[0], spin2[1], spin2[2], distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp = plus_test.data.data
hp_test = hp[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
return hp_test
def generate_params_points(npts, nparams, params_low, params_high):
paramspoints = numpy.random.uniform(params_low, params_high, size=(npts,nparams))
paramspoints = paramspoints.round(decimals=6)
return paramspoints
# now generating N=npts waveforms at points that are
# randomly uniformly distributed in parameter space
# and calculate their inner products with the 1st waveform
# so as to find the best waveform as the new basis
def least_match_waveform_unnormalized(paramspoints, known_bases, npts, distance, deltaF, f_min, f_max, waveFlags, approximant):
overlaps = numpy.zeros(npts)
modula = numpy.zeros(npts)
for i in numpy.arange(0,len(paramspoints)):
paramspoint = paramspoints[i]
m1, m2 = get_m1m2_from_mcq(paramspoint[0],paramspoint[1])
s1x, s1y, s1z = spherical_to_cartesian(paramspoint[2:5])
s2x, s2y, s2z = spherical_to_cartesian(paramspoint[5:8])
iota = paramspoint[8]
phiRef = paramspoint[9]
ecc = 0
if len(paramspoint)==11:
ecc = paramspoint[10]
if len(paramspoint)==12:
lambda1 = paramspoint[10]
lambda2 = paramspoint[11]
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
f_ref = 0
RA=0
DEC=0
psi=0
phi=0
m1 *= lal.lal.MSUN_SI
m2 *= lal.lal.MSUN_SI
[plus,cross]=lalsimulation.SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z, s2x, s2y, s2z, distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, f_ref, waveFlags, approximant)
hp_tmp = plus.data.data[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)] # data_tmp is hplus and is a complex vector
residual = hp_tmp
for k in numpy.arange(0,len(known_bases)):
residual -= proj(known_bases[k],hp_tmp)
modula[i] = numpy.sqrt(numpy.vdot(residual, residual))
arg_newbasis = numpy.argmax(modula)
mass1, mass2 = get_m1m2_from_mcq(paramspoints[arg_newbasis][0],paramspoints[arg_newbasis][1])
mass1 *= lal.lal.MSUN_SI
mass2 *= lal.lal.MSUN_SI
sp1x, sp1y, sp1z = spherical_to_cartesian(paramspoints[arg_newbasis,2:5])
sp2x, sp2y, sp2z = spherical_to_cartesian(paramspoints[arg_newbasis,5:8])
inclination = paramspoints[arg_newbasis][8]
phi_ref = paramspoints[arg_newbasis][9]
ecc = 0
if len(paramspoint)==11:
ecc = paramspoints[arg_newbasis][10]
if len(paramspoint)==12:
lambda1 = paramspoints[arg_newbasis][10]
lambda2 = paramspoints[arg_newbasis][11]
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_new, cross_new]=lalsimulation.SimInspiralChooseFDWaveform(mass1, mass2, sp1x, sp1y, sp1z, sp2x, sp2y, sp2z, distance, inclination, phi_ref, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp_new = plus_new.data.data
hp_new = hp_new[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
basis_new = gram_schmidt(known_bases, hp_new)
return numpy.array([basis_new, paramspoints[arg_newbasis], modula[arg_newbasis]]) # elements, masses&spins, residual mod
def least_match_quadratic_waveform_unnormalized(paramspoints, known_quad_bases, npts, distance, deltaF, f_min, f_max, waveFlags, approximant):
overlaps = numpy.zeros(npts)
modula = numpy.zeros(npts)
for i in numpy.arange(0,len(paramspoints)):
paramspoint = paramspoints[i]
m1, m2 = get_m1m2_from_mcq(paramspoint[0],paramspoint[1])
s1x, s1y, s1z = spherical_to_cartesian(paramspoint[2:5])
s2x, s2y, s2z = spherical_to_cartesian(paramspoint[5:8])
iota=paramspoint[8]
phiRef=paramspoint[9]
ecc = 0
if len(paramspoint)==11:
ecc = paramspoint[10]
if len(paramspoint)==12:
lambda1 = paramspoint[10]
lambda2 = paramspoint[11]
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
f_ref = 0
RA=0
DEC=0
psi=0
phi=0
m1 *= lal.lal.MSUN_SI
m2 *= lal.lal.MSUN_SI
[plus,cross]=lalsimulation.SimInspiralChooseFDWaveform(m1, m2, s1x, s1y, s1z, s2x, s2y, s2z, distance, iota, phiRef, 0, ecc, 0, deltaF, f_min, f_max, f_ref, waveFlags, approximant)
hp_tmp = plus.data.data[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)] # data_tmp is hplus and is a complex vector
hp_quad_tmp = (numpy.absolute(hp_tmp))**2
residual = hp_quad_tmp
for k in numpy.arange(0,len(known_quad_bases)):
residual -= proj(known_quad_bases[k],hp_quad_tmp)
modula[i] = numpy.sqrt(numpy.vdot(residual, residual))
arg_newbasis = numpy.argmax(modula)
mass1, mass2 = get_m1m2_from_mcq(paramspoints[arg_newbasis][0],paramspoints[arg_newbasis][1])
mass1 *= lal.lal.MSUN_SI
mass2 *= lal.lal.MSUN_SI
sp1x, sp1y, sp1z = spherical_to_cartesian(paramspoints[arg_newbasis,2:5])
sp2x, sp2y, sp2z = spherical_to_cartesian(paramspoints[arg_newbasis,5:8])
inclination = paramspoints[arg_newbasis][8]
phi_ref = paramspoints[arg_newbasis][9]
ecc = 0
if len(paramspoint)==11:
ecc = paramspoints[arg_newbasis][10]
if len(paramspoint)==12:
lambda1 = paramspoints[arg_newbasis][10]
lambda2 = paramspoints[arg_newbasis][11]
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(waveFlags, lambda1)
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(waveFlags, lambda2)
[plus_new, cross_new]=lalsimulation.SimInspiralChooseFDWaveform(mass1, mass2, sp1x, sp1y, sp1z, sp2x, sp2y, sp2z, distance, inclination, phi_ref, 0, ecc, 0, deltaF, f_min, f_max, 0, waveFlags, approximant)
hp_new = plus_new.data.data
hp_new = hp_new[numpy.int(f_min/deltaF):numpy.int(f_max/deltaF)]
hp_quad_new = (numpy.absolute(hp_new))**2
basis_quad_new = gram_schmidt(known_quad_bases, hp_quad_new)
return numpy.array([basis_quad_new, paramspoints[arg_newbasis], modula[arg_newbasis]]) # elements, masses&spins, residual mod
def bases_searching_results_unnormalized(npts, nparams, nbases, known_bases, basis_waveforms, params, residual_modula, params_low, params_high, distance, deltaF, f_min, f_max, waveFlags, approximant):
if nparams == 10: print("The parameters are Mc, q, s1(mag, theta, phi), s2(mag, theta, phi), iota, and phiRef\n")
if nparams == 11: print("The parameters are Mc, q, s1(mag, theta, phi), s2(mag, theta, phi), iota, phiRef, and eccentricity\n")
if nparams == 12: print("The parameters are Mc, q, s1(mag, theta, phi), s2(mag, theta, phi), iota, phiRef, lambda1, and lambda2\n")
for k in numpy.arange(0,nbases-1):
params_points = generate_params_points(npts, nparams, params_low, params_high)
basis_new, params_new, rm_new= least_match_waveform_unnormalized(params_points, known_bases, npts, distance, deltaF, f_min, f_max, waveFlags, approximant)
print("Linear Iter: ", k, params_new)
known_bases= numpy.append(known_bases, numpy.array([basis_new]), axis=0)
params = numpy.append(params, numpy.array([params_new]), axis = 0)
residual_modula = numpy.append(residual_modula, rm_new)
return known_bases, params, residual_modula
def bases_searching_quadratic_results_unnormalized(npts, nparams, nbases_quad, known_quad_bases, basis_waveforms, params_quad, residual_modula, params_low, params_high, distance, deltaF, f_min, f_max, waveFlags, approximant):
for k in numpy.arange(0,nbases_quad-1):
print("Quadratic Iter: ", k)
params_points = generate_params_points(npts, nparams, params_low, params_high)
basis_new, params_new, rm_new= least_match_quadratic_waveform_unnormalized(params_points, known_quad_bases, npts, distance, deltaF, f_min, f_max, waveFlags, approximant)
known_quad_bases= numpy.append(known_quad_bases, numpy.array([basis_new]), axis=0)
params_quad = numpy.append(params_quad, numpy.array([params_new]), axis = 0)
residual_modula = numpy.append(residual_modula, rm_new)
return known_quad_bases, params_quad, residual_modula
def massrange(mc_low, mc_high, q_low, q_high):
mmin = get_m1m2_from_mcq(mc_low,q_high)[1]
mmax = get_m1m2_from_mcq(mc_high,q_high)[0]
return [mmin, mmax]
def initial_basis(mc_low, mc_high, q_low, q_high, s1sphere_low, s1sphere_high, s2sphere_low, s2sphere_high, ecc_low, ecc_high, lambda1_low, lambda1_high, lambda2_low, lambda2_high, iota_low, iota_high, phiref_low, phiref_high, distance, deltaF, f_min, f_max, waveFlags, approximant):
try:
if approximant==lalsimulation.IMRPhenomPv2:
nparams = 10
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomPv3:
nparams = 10
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomPv3HM:
nparams = 10
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomXHM:
nparams = 10
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.TaylorF2Ecc:
nparams = 11
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], iota_low, phiref_low, ecc_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], iota_high, phiref_high, ecc_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], 0.33333*np.pi, 1.5*np.pi, ecc_low]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), ecc_low, 0, 0, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomPv2_NRTidal:
nparams = 12
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], lambda1_low, lambda2_low, iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], lambda1_high, lambda2_high, iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], lambda1_low, lambda2_low, 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, lambda1_low, lambda2_low, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
try:
if approximant==lalsimulation.IMRPhenomNSBH:
nparams = 12
params_low = [mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], lambda1_low, lambda2_low, iota_low, phiref_low]
params_high = [mc_high, q_high, s1sphere_high[0], s1sphere_high[1], s1sphere_high[2], s2sphere_high[0], s2sphere_high[1], s2sphere_high[2], lambda1_high, lambda2_high, iota_high, phiref_high]
params_start = numpy.array([[mc_low, q_low, s1sphere_low[0], s1sphere_low[1], s1sphere_low[2], s2sphere_low[0], s2sphere_low[1], s2sphere_low[2], lambda1_low, lambda2_low, 0.33333*np.pi, 1.5*np.pi]])
hp1 = generate_a_waveform_from_mcq(mc_low, q_low, spherical_to_cartesian(s1sphere_low), spherical_to_cartesian(s2sphere_low), 0, lambda1_low, lambda2_low, iota_low, phiref_low, distance, deltaF, f_min, f_max, waveFlags, approximant)
except AttributeError:
pass
return numpy.array([nparams, params_low, params_high, params_start, hp1])
def empnodes(ndim, known_bases): # Here known_bases is the full copy known_bases_copy. Its length is equal to or longer than ndim.
emp_nodes = numpy.arange(0,ndim)*100000000
emp_nodes[0] = numpy.argmax(numpy.absolute(known_bases[0]))
c1 = known_bases[1,emp_nodes[0]]/known_bases[0,1]
interp1 = numpy.multiply(c1,known_bases[0])
diff1 = interp1 - known_bases[1]
r1 = numpy.absolute(diff1)
emp_nodes[1] = numpy.argmax(r1)
for k in numpy.arange(2,ndim):
emp_tmp = emp_nodes[0:k]
Vtmp = numpy.transpose(known_bases[0:k,emp_tmp])
inverse_Vtmp = numpy.linalg.pinv(Vtmp)
e_to_interp = known_bases[k]
Ci = numpy.dot(inverse_Vtmp, e_to_interp[emp_tmp])
interpolantA = numpy.zeros(len(known_bases[k]))+numpy.zeros(len(known_bases[k]))*1j
for j in numpy.arange(0, k):
tmp = numpy.multiply(Ci[j], known_bases[j])
interpolantA += tmp
diff = interpolantA - known_bases[k]
r = numpy.absolute(diff)
emp_nodes[k] = numpy.argmax(r)
emp_nodes = sorted(emp_nodes)
u, c = numpy.unique(emp_nodes, return_counts=True)
dup = u[c > 1]
#print(len(emp_nodes), "\nDuplicates indices:", dup)
emp_nodes = numpy.unique(emp_nodes)
ndim = len(emp_nodes)
#print(len(emp_nodes), "\n", emp_nodes)
V = numpy.transpose(known_bases[0:ndim, emp_nodes])
inverse_V = numpy.linalg.pinv(V)
return numpy.array([ndim, inverse_V, emp_nodes])
def surroerror(ndim, inverse_V, emp_nodes, known_bases, test_mc, test_q, test_s1, test_s2, test_ecc, test_lambda1, test_lambda2, test_iota, test_phiref, distance, deltaF, f_min, f_max, waveFlags, approximant):
hp_test = generate_a_waveform_from_mcq(test_mc, test_q, test_s1, test_s2, test_ecc, test_lambda1, test_lambda2, test_iota, test_phiref, distance, deltaF, f_min, f_max, waveFlags, approximant)
Ci = | numpy.dot(inverse_V, hp_test[emp_nodes]) | numpy.dot |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 17:19:24 2021
@author: tungdang
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 14:16:33 2021
@author: tungbioinfo
"""
import warnings
from abc import ABCMeta, abstractmethod
from time import time
import math
from scipy.misc import factorial
import numpy as np
import numpy.ma as ma
import pandas as pd
from scipy.special import betaln, digamma, gammaln, logsumexp, polygamma
from scipy import linalg
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import _deprecate_positional_args, check_is_fitted
from sklearn import cluster
from sklearn.utils.extmath import row_norms
#------------------------------------------------------------------------------
# Check gamma + delta update
#------------------------------------------------------------------------------
gamma_vi = np.ones((n_components, n_features))
delta_vi = np.ones((n_components, n_features))
n_components = 5
n_samples, n_features = X.shape
nk = np.dot(resp.T, select) + 10 * np.finfo(resp.dtype).eps
gamma = np.ones((n_components, n_features))
delta = | np.ones((n_components, n_features)) | numpy.ones |
import os
import re
import numpy as np
def get_subject_files(dataset, files, sid):
"""Get a list of files storing each subject data."""
# Pattern of the subject files from different datasets
if "mass" in dataset:
reg_exp = f".*-00{str(sid+1).zfill(2)} PSG.npz"
# reg_exp = "SS3_00{}\.npz$".format(str(sid+1).zfill(2))
elif "sleepedf" in dataset:
reg_exp = f"S[C|T][4|7]{str(sid).zfill(2)}[a-zA-Z0-9]+\.npz$"
# reg_exp = "[a-zA-Z0-9]*{}[1-9]E0\.npz$".format(str(sid).zfill(2))
elif "isruc" in dataset:
reg_exp = f"subject{sid}.npz"
elif "ucddb" in dataset:
reg_exp = f"ucddb{str(sid).zfill(3)}.npz"
else:
raise Exception("Invalid datasets.")
# Get the subject files based on ID
subject_files = []
for i, f in enumerate(files):
pattern = re.compile(reg_exp)
if pattern.search(f):
subject_files.append(f)
#print(len(subject_files))
return subject_files
def load_data(subject_files):
"""Load data from subject files."""
signals = []
labels = []
for sf in subject_files:
with np.load(sf) as f:
x = f['x']
y = f['y']
# Reshape the data to match the input of the model - conv2d
x = | np.squeeze(x) | numpy.squeeze |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 13:45:27 2018
@author: sreimond
"""
import numpy as np
from datetime import datetime
def is_leap( years, cal='auto' ):
"""
The `is_leap` function enables array input.
Documentation see the `_is_leap` function.
"""
years = np.array(years,ndmin=1)
years_count = np.size(years)
ret = np.zeros(years_count,dtype=np.bool_)
for ix in range(years_count):
try:
ret[ix] = _is_leap( years[ix], cal=cal )
except:
ret[ix] = np.nan
return ret
def is_julian( y, m, d ):
"""
The `is_julian` function enables array input.
Documentation see the `_is_julian` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.bool_)
for ix in range(years_count):
try:
ret[ix] = _is_julian( years[ix], months[ix], days[ix] )
except:
ret[ix] = np.nan
return ret
def is_gregorian( y, m, d ):
"""
The `is_gregorian` function enables array input.
Documentation see the `_is_julian` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.bool_)
for ix in range(years_count):
try:
ret[ix] = _is_gregorian( years[ix], months[ix], days[ix] )
except:
ret[ix] = np.nan
return ret
def ymd2jd( y, m, d, cal='auto' ):
"""
The `ymd2jd` function enables array input.
Documentation see the `_ymd2jd` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.float_)
for ix in range(years_count):
try:
ret[ix] = _ymd2jd( years[ix], months[ix], days[ix], cal=cal )
except:
ret[ix] = np.nan
return ret
def jd2ymd( jd, cal='auto' ):
"""
The `jd2ymd` function enables array input.
Documentation see the `_jd2ymd` function.
"""
jd = np.array(jd,ndmin=1)
jd_count = np.size(jd)
years = np.zeros(jd_count,dtype=np.int_)
months = np.zeros(jd_count,dtype=np.int_)
days = np.zeros(jd_count,dtype=np.float_)
for ix in range(jd_count):
try:
years[ix], months[ix], days[ix] = _jd2ymd( jd[ix], cal=cal )
except:
years[ix], months[ix], days[ix] = np.nan, np.nan, np.nan
return years, months, days
def ymd2mjd( y, m, d, cal='auto' ):
"""
The `ymd2mjd` function enables array input.
Documentation see the `_ymd2mjd` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.float_)
for ix in range(years_count):
try:
ret[ix] = _ymd2mjd( years[ix], months[ix], days[ix], cal=cal )
except:
ret[ix] = np.nan
return ret
def mjd2ymd( mjd, cal='auto' ):
"""
The `mjd2ymd` function enables array input.
Documentation see the `_mjd2ymd` function.
"""
mjd = np.array(mjd,ndmin=1)
mjd_count = np.size(mjd)
years = np.zeros(mjd_count,dtype=np.int_)
months = np.zeros(mjd_count,dtype=np.int_)
days = np.zeros(mjd_count,dtype=np.float_)
for ix in range(mjd_count):
try:
years[ix], months[ix], days[ix] = _mjd2ymd( mjd[ix], cal=cal )
except:
years[ix], months[ix], days[ix] = np.nan, np.nan, np.nan
return years, months, days
def jd2dow( jd ):
"""
The `jd2dow` function enables array input.
Documentation see the `_jd2dow` function.
"""
jd = np.array(jd,ndmin=1)
jd_count = np.size(jd)
days_number = np.zeros(jd_count,dtype=np.int_)
days_name = np.zeros(jd_count,dtype='|S3')
for ix in range(jd_count):
try:
days_number[ix], days_name[ix] = _jd2dow( jd[ix] )
except:
days_number[ix], days_name[ix] = np.nan, 'nan'
return days_number, days_name
def mjd2dow( mjd ):
"""
The `mjd2dow` function enables array input.
Documentation see the `_mjd2dow` function.
"""
mjd = np.array(mjd,ndmin=1)
mjd_count = np.size(mjd)
days_number = np.zeros(mjd_count,dtype=np.int_)
days_name = np.zeros(mjd_count,dtype='|S3')
for ix in range(mjd_count):
try:
days_number[ix], days_name[ix] = _mjd2dow( mjd[ix] )
except:
days_number[ix], days_name[ix] = np.nan, 'nan'
return days_number, days_name
def dhms2day( d, h, m, s ):
"""
The `dhms2day` function enables array input.
Documentation see the `_dhms2day` function.
"""
days = np.array(d,ndmin=1)
hours = np.array(h,ndmin=1)
minutes = np.array(m,ndmin=1)
seconds = np.array(s,ndmin=1)
days_count = np.size(days)
dim_check = ((days_count == np.size(hours))
and (days_count == np.size(minutes))
and (days_count == np.size(seconds)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(days_count,dtype=np.float_)
for ix in range(days_count):
try:
ret[ix] = _dhms2day( days[ix], hours[ix], minutes[ix], seconds[ix] )
except:
ret[ix] = np.nan
return ret
def day2dhms( day ):
"""
The `day2dhms` function enables array input.
Documentation see the `_day2dhms` function.
"""
day = np.array(day,ndmin=1)
day_count = np.size(day)
days = np.zeros(day_count,dtype=np.int_)
hours = np.zeros(day_count,dtype=np.int_)
minutes = np.zeros(day_count,dtype=np.int_)
seconds = np.zeros(day_count,dtype=np.float_)
for ix in range(day_count):
try:
days[ix], hours[ix], minutes[ix], seconds[ix] = _day2dhms( day[ix] )
except:
days[ix], hours[ix], minutes[ix], seconds[ix] = np.nan, np.nan, np.nan, np.nan
return days, hours, minutes, seconds
def ymd2doy( y, m, d ):
"""
The `ymd2doy` function enables array input.
Documentation see the `_ymd2doy` function.
"""
years = np.array(y,ndmin=1)
months = np.array(m,ndmin=1)
days = np.array(d,ndmin=1)
years_count = np.size(years)
dim_check = ((years_count == np.size(months))
and (years_count == np.size(days)))
if not dim_check:
raise ValueError('dimension mismatch')
ret = np.zeros(years_count,dtype=np.int_)
for ix in range(years_count):
try:
ret[ix] = _ymd2doy( years[ix], months[ix], days[ix] )
except:
ret[ix] = np.nan
return ret
def jd2doy( jd ):
"""
The `jd2doy` function enables array input.
Documentation see the `_jd2doy` function.
"""
jd = np.array(jd,ndmin=1)
jd_count = np.size(jd)
doys = np.zeros(jd_count,dtype=np.int_)
for ix in range(jd_count):
try:
doys[ix] = _jd2doy( jd[ix] )
except:
doys[ix] = np.nan
return doys
def mjd2doy( mjd ):
"""
The `mjd2doy` function enables array input.
Documentation see the `_mjd2doy` function.
"""
mjd = np.array(mjd,ndmin=1)
mjd_count = np.size(mjd)
doys = np.zeros(mjd_count,dtype=np.int_)
for ix in range(mjd_count):
try:
doys[ix] = _mjd2doy( mjd[ix] )
except:
doys[ix] = np.nan
return doys
def doy2ymd( y, doy ):
"""
The `doy2ymd` function enables array input.
Documentation see the `_doy2ymd` function.
"""
ys = np.array(y,ndmin=1)
doys = np.array(doy,ndmin=1)
ys_count = np.size(ys)
dim_check = (ys_count == np.size(doys))
if not dim_check:
raise ValueError('dimension mismatch')
years = np.zeros(ys_count,dtype=np.int_)
months = np.zeros(ys_count,dtype=np.int_)
days = np.zeros(ys_count,dtype=np.float_)
for ix in range(ys_count):
try:
years[ix], months[ix], days[ix] = _doy2ymd( ys[ix], doys[ix] )
except:
years[ix], months[ix], days[ix] = np.nan, np.nan, np.nan
return years, months, days
def doy2jd( y, doy ):
"""
The `doy2jd` function enables array input.
Documentation see the `_doy2jd` function.
"""
ys = np.array(y,ndmin=1)
doys = np.array(doy,ndmin=1)
ys_count = np.size(ys)
ret = np.zeros(ys_count,dtype=np.float_)
for ix in range(ys_count):
try:
ret[ix] = _doy2jd( ys[ix], doys[ix] )
except:
ret[ix] = np.nan
return ret
def doy2mjd( y, doy ):
"""
The `doy2mjd` function enables array input.
Documentation see the `_doy2mjd` function.
"""
ys = np.array(y,ndmin=1)
doys = np.array(doy,ndmin=1)
ys_count = np.size(ys)
ret = np.zeros(ys_count,dtype=np.float_)
for ix in range(ys_count):
try:
ret[ix] = _doy2mjd( ys[ix], doys[ix] )
except:
ret[ix] = np.nan
return ret
def mjd2jd( mjd ):
"""
The `mjd2jd` function enables array input.
Documentation see the `_mjd2jd` function.
"""
mjd = | np.array(mjd,ndmin=1) | numpy.array |
import numpy as np, pandas as pd
import collections, pickle, os
from glob import glob
from collections import OrderedDict
from astropy.io import fits
from astrobase.lcmath import time_bin_magseries_with_errs
from cdips.lcproc.mask_orbit_edges import mask_orbit_start_and_end
from cdips.plotting.vetting_pdf import _given_mag_get_flux
from timmy.paths import DATADIR, RESULTSDIR
from numpy import array as nparr
from scipy.stats import gaussian_kde
def detrend_tessphot(x_obs, y_obs, y_err):
from wotan import flatten
flat_flux, trend_flux = flatten(x_obs, y_obs, method='hspline',
window_length=0.3,
break_tolerance=0.4, return_trend=True)
# flat_flux, trend_flux = flatten(time, flux, method='pspline',
# break_tolerance=0.4, return_trend=True)
# flat_flux, trend_flux = flatten(time, flux, method='biweight',
# window_length=0.3, edge_cutoff=0.5,
# break_tolerance=0.4, return_trend=True,
# cval=2.0)
return flat_flux, trend_flux
def get_tessphot(provenance, yval):
"""
provenance: 'spoc' or 'cdips'
yval:
spoc: 'SAP_FLUX', 'PDCSAP_FLUX'
cdips: 'PCA1', 'IRM1', etc.
"""
if provenance == 'spoc':
lcpaths = glob(os.path.join(
DATADIR, 'MAST_2020-05-04T1852/TESS/*/*-s_lc.fits'))
assert len(lcpaths) == 2
elif provenance == 'cdips':
lcpaths = glob(os.path.join(
DATADIR, 'MAST_2020-05-04T1852/HLSP/*/*cdips*llc.fits'))
assert len(lcpaths) == 2
else:
raise NotImplementedError
time, flux, flux_err, qual = [], [], [], []
for l in lcpaths:
hdul = fits.open(l)
d = hdul[1].data
if provenance == 'spoc':
time.append(d['TIME'])
_f, _f_err = d[yval], d[yval+'_ERR']
flux.append(_f/np.nanmedian(_f))
flux_err.append(_f_err/np.nanmedian(_f))
qual.append(d['QUALITY'])
elif provenance == 'cdips':
time.append(d['TMID_BJD'] - 2457000)
_f, _f_err = _given_mag_get_flux(d[yval], err_mag=d['IRE'+yval[-1]])
flux.append(_f)
flux_err.append(_f_err)
hdul.close()
time = np.concatenate(time).ravel()
flux = np.concatenate(flux).ravel()
flux_err = np.concatenate(flux_err).ravel()
if len(qual)>0:
qual = np.concatenate(qual).ravel()
return time, flux, flux_err, qual
def get_clean_tessphot(provenance, yval, binsize=None, maskflares=0):
"""
Get data. Mask quality cut.
Optionally bin, to speed fitting (linear in time, but at the end of the
day, you want 2 minute).
"""
time, flux, flux_err, qual = get_tessphot(provenance, yval)
N_i = len(time) # initial
if provenance == 'spoc':
# [ 0, 1, 8, 16, 32, 128, 160, 168, 176, 180, 181,
# 512, 2048, 2080, 2176, 2216, 2560]
binary_repr_vec = np.vectorize(np.binary_repr)
qual_binary = binary_repr_vec(qual, width=12)
# See Table 28 of EXP-TESS-ARC-ICD-TM-0014
# don't want:
# bit 3 coarse point
# bit 4 Earth point
# bit 6 reaction wheel desaturation
# bit 8 maunal exclude
# bit 11 cosmic ray detected on collateral pixel row or column
# bit 12 straylight from earth or moon in camera fov
# badbits = [3,4,6,8,11]
badbits = [3,4,6,8,11,12]
sel = np.isfinite(qual)
for bb in badbits:
# note zero vs one-based count here to convert bitwise flags to
# python flags
sel &= ~(np.array([q[bb-1] for q in qual_binary]).astype(bool))
time, flux, flux_err = time[sel], flux[sel], flux_err[sel]
inds = np.argsort(time)
time, flux, flux_err = time[inds], flux[inds], flux_err[inds]
N_ii = len(time) # after quality cut
# finite times, fluxes, flux errors.
sel = np.isfinite(time) & np.isfinite(flux) & np.isfinite(flux_err)
time, flux, flux_err = time[sel], flux[sel], flux_err[sel]
N_iii = len(time)
# time, flux, sel = mask_orbit_start_and_end(time, flux, orbitgap=0.5,
# expected_norbits=2,
# orbitpadding=6/(24),
# raise_expectation_error=True,
# return_inds=True)
# flux_err = flux_err[sel]
# N_iii = len(time) # after orbit edge masking
if maskflares:
t_offset = np.nanmin(time)
FLARETIMES = [
(4.60+t_offset, 4.63+t_offset),
(37.533+t_offset, 37.62+t_offset)
]
flaresel = np.zeros_like(time).astype(bool)
for ft in FLARETIMES:
flaresel |= ( (time > min(ft)) & (time < max(ft)) )
time, flux, flux_err = (
time[~flaresel], flux[~flaresel], flux_err[~flaresel]
)
N_iv = len(time)
x_obs = time
y_obs = (flux / np.nanmedian(flux))
y_err = flux_err / np.nanmedian(flux)
print(42*'-')
print('N initial: {}'.
format(N_i))
print('N after quality cut: {}'.
format(N_ii))
print('N after quality cut + finite masking: {}'.
format(N_iii))
if maskflares:
print('N after quality cut + finite masking + flare masking: {}'.
format(N_iv))
print(42*'-')
if isinstance(binsize, int):
bd = time_bin_magseries_with_errs(
x_obs, y_obs, y_err, binsize=binsize, minbinelems=5
)
x_obs = bd['binnedtimes']
y_obs = bd['binnedmags']
# assume errors scale as sqrt(N)
original_cadence = 120
y_err = bd['binnederrs'] / (binsize/original_cadence)**(1/2)
assert len(x_obs) == len(y_obs) == len(y_err)
return (
x_obs.astype(np.float64),
y_obs.astype(np.float64),
y_err.astype(np.float64)
)
def get_elsauce_phot(datestr=None):
"""
get ground-based photometry from Phil Evans.
2020-04-01: R_c
2020-04-26: R_c
2020-05-21: I_c
2020-06-14: B_j
"""
if datestr is None:
lcglob = os.path.join(RESULTSDIR, 'groundphot', 'externalreduc',
'bestkaren', 'to_fit', '*.dat')
lcpaths = glob(lcglob)
assert len(lcpaths) == 4
else:
lcglob = os.path.join(RESULTSDIR, 'groundphot', 'externalreduc',
'bestkaren', 'to_fit', f'TIC*{datestr}*.dat')
lcpaths = glob(lcglob)
assert len(lcpaths) == 1
time, flux, flux_err = [], [], []
for l in lcpaths:
df = pd.read_csv(l, delim_whitespace=True)
time.append(nparr(df['BJD_TDB']))
if 'rel_flux_T1_dfn' in df:
flux_k = 'rel_flux_T1_dfn'
flux_err_k = 'rel_flux_err_T1_dfn'
else:
flux_k = 'rel_flux_T1_n'
flux_err_k = 'rel_flux_err_T1_n'
flux.append(nparr(df[flux_k]))
flux_err.append(nparr(df[flux_err_k]))
time = np.concatenate(time).ravel()
flux = np.concatenate(flux).ravel()
flux_err = np.concatenate(flux_err).ravel()
return time, flux, flux_err
def get_astep_phot(datestr=None):
"""
get ground-based photometry from ASTEP400
datestrs = ['20200529', '20200614', '20200623']
"""
if datestr is None:
raise NotImplementedError
else:
lcglob = os.path.join(RESULTSDIR, 'groundphot', 'externalreduc',
'ASTEP_to_fit', f'TIC*{datestr}*.csv')
lcpaths = glob(lcglob)
assert len(lcpaths) == 1
time, flux, flux_err = [], [], []
for l in lcpaths:
df = pd.read_csv(l)
time_k = 'BJD'
flux_k = 'FLUX'
flux_err_k = 'ERRFLUX'
time.append(nparr(df[time_k]))
flux.append(nparr(df[flux_k]))
flux_err.append(nparr(df[flux_err_k]))
time = np.concatenate(time).ravel()
flux = np.concatenate(flux).ravel()
flux_err = np.concatenate(flux_err).ravel()
return time, flux, flux_err
def get_clean_rv_data(datestr='20200525'):
# get zero-subtracted RV CSV in m/s units, time-sorted.
rvpath = os.path.join(DATADIR, 'spectra', 'RVs_{}.csv'.format(datestr))
df = pd.read_csv(rvpath)
time = nparr(df['time'])
mnvel = nparr(df['mnvel'])
errvel = nparr(df['errvel'])
telvec = nparr(df['tel'])
source = nparr(df['Source'])
# first, zero-subtract each instrument median. then, set units to be
# m/s, not km/s.
umedians = {}
for uinstr in np.unique(telvec):
umedians[uinstr] = np.nanmedian(mnvel[telvec == uinstr])
mnvel[telvec == uinstr] -= umedians[uinstr]
mnvel *= 1e3
errvel *= 1e3
# time-sort
inds = np.argsort(time)
time = | np.ascontiguousarray(time[inds], dtype=float) | numpy.ascontiguousarray |
"""
Test script for data.py classes.
"""
import os
import cwinpy
import lal
import numpy as np
import pytest
from cwinpy import HeterodynedData, MultiHeterodynedData
from cwinpy.data import PSDwrapper
from lalpulsar.PulsarParametersWrapper import PulsarParametersPy
from matplotlib.figure import Figure
class TestHeterodynedData(object):
"""
Tests for the HeterodynedData and MultiHeterodynedData objects.
"""
def test_no_data(self):
"""
Test exception occurs if passing no data and no time stamps.
"""
# test exception if no data or times are passed
with pytest.raises(ValueError):
HeterodynedData()
def test_broken_data(self):
"""
Test reading of data fails during to a "broken" input file
"""
# create a "broken" input file (not the spurious "H")
brokendata = """\
# times real imaginary
1000000000.0 -2.3e-25 4.3e-26
1000000060.0 H.2e-26 1.2e-25
1000000120.0 -1.7e-25 -2.8e-25
1000000180.0 -7.6e-26 -8.9e-26
"""
brokenfile = "brokendata.txt"
with open(brokenfile, "w") as fp:
fp.write(brokendata)
with pytest.raises(IOError):
HeterodynedData(brokenfile)
# run through MultiHeterodynedData
with pytest.raises(ValueError):
MultiHeterodynedData(brokenfile)
with pytest.raises(IOError):
MultiHeterodynedData({"H1": brokenfile})
os.remove(brokenfile) # clean up file
def test_multi_data(self):
"""
Test various ways of generating data for multiple detectors.
"""
# create four datasets
times1 = np.linspace(1000000000.0, 1000086340.0, 1440)
data1 = np.random.normal(0.0, 1e-25, size=(1440, 2))
detector1 = "H1"
times2 = np.linspace(1000000000.0, 1000086340.0, 1440)
data2 = np.random.normal(0.0, 1e-25, size=(1440, 2))
detector2 = "L1"
times3 = np.linspace(1000000000.0, 1000086340.0, 1440)
data3 = np.random.normal(0.0, 1e-25, size=(1440, 2))
detector3 = "G1"
times4 = np.linspace(1000000000.0, 1000086340.0, 1440)
data4 = np.random.normal(0.0, 1e-25, size=(1440, 2))
detector4 = "K1"
# add first dataset as precreated HeterodynedData object
het1 = HeterodynedData(data1, times=times1, detector=detector1)
mhet = MultiHeterodynedData(het1)
# add second dataset as a dictionary
ddic = {detector2: data2}
tdic = {"XX": times2} # set to fail
# add second dataset
with pytest.raises(KeyError):
mhet.add_data(ddic, tdic, detector2)
# fix tdic
tdic = {detector2: times2}
mhet.add_data(ddic, tdic, detector2)
# add third data set as a dictionary of HeterodynedData
het3 = HeterodynedData(data3, times=times3, detector=detector3)
ddic = {detector3: het3}
mhet.add_data(ddic)
# add fourth data set by just passing the data
tdic = {detector4: times4} # fail with dictionary of times
with pytest.raises(TypeError):
mhet.add_data(data4, tdic, detector4)
# just add with times
mhet.add_data(data4, times4, detector4)
assert len(mhet) == 4
assert len(mhet.detectors) == 4
assert len(mhet.to_list) == 4
# test looping over MultiHeterodynedData
dets = [detector1, detector2, detector3, detector4]
for data, det in zip(mhet, dets):
assert det == data.detector
def test_too_many_columns(self):
"""
Test for failure if there are too many columns in the data file.
"""
# create a "broken" input file (not the spurious "H")
brokendata = """\
# times real imaginary std extra
1000000000.0 -2.3e-25 4.3e-26 1e-26 1
1000000060.0 3.2e-26 1.2e-25 1e-26 2
1000000120.0 -1.7e-25 -2.8e-25 1e-26 3
1000000180.0 -7.6e-26 -8.9e-26 1e-26 4
"""
brokenfile = "brokendata.txt"
with open(brokenfile, "w") as fp:
fp.write(brokendata)
with pytest.raises(IOError):
HeterodynedData(brokenfile)
os.remove(brokenfile) # clean up file
def test_too_few_columns(self):
"""
Test for failure if there are too few columns in the data file.
"""
# create a "broken" input file (not the spurious "H")
brokendata = """\
# times real
1000000000.0 -2.3e-25
1000000060.0 3.2e-26
1000000120.0 -1.7e-25
1000000180.0 -7.6e-26
"""
brokenfile = "brokendata.txt"
with open(brokenfile, "w") as fp:
fp.write(brokendata)
with pytest.raises(IOError):
HeterodynedData(brokenfile)
os.remove(brokenfile) # clean up file
def test_nonuniform_data(self):
"""
Test that non-uniform data times stamps are correctly retained.
"""
# create four datasets
times = np.linspace(1000000000.0, 1000086340.0, 1440)
# remove some times to create non-uniform sampling
times = np.delete(times, [20, 897, 1200])
data = np.random.normal(0.0, 1e-25, size=(len(times), 2))
detector = "H1"
het = HeterodynedData(data=data, times=times, detector=detector)
assert np.all(times == het.times.value)
assert het.dt.value == np.min(np.diff(times))
def test_read_text_data(self):
"""
Test that a valid input file is read in correctly.
"""
# create a data file to output
hetdata = """\
# times real imaginary
1000000000.0 -2.3e-25 4.3e-26
1000000060.0 3.2e-26 1.2e-25
1000000120.0 -1.7e-25 -2.8e-25
1000000180.0 -7.6e-26 -8.9e-26
"""
datafile = "testdata.txt"
with open("testdata.txt", "w") as fp:
fp.write(hetdata)
het = HeterodynedData(datafile)
assert len(het) == 4
assert (het.data.real[0] == -2.3e-25) and (het.data.real[-1] == -7.6e-26)
assert (het.data.imag[0] == 4.3e-26) and (het.data.imag[-1] == -8.9e-26)
assert (het.times[0].value == 1000000000.0) and (
het.times[-1].value == 1000000180.0
)
assert het.dt.value == 60.0
assert het.sample_rate.value == 1.0 / 60.0
os.remove(datafile) # clean up file
def test_read_text_data_std(self):
"""
Test that a valid file with standard deviations is read in correctly.
"""
# create a data file to output
hetdata = """\
# times real imaginary std
1000000000.0 -2.3e-25 4.3e-26 1.1e-26
1000000060.0 3.2e-26 1.2e-25 2.1e-26
1000000120.0 -1.7e-25 -2.8e-25 1.5e-26
1000000180.0 -7.6e-26 -8.9e-26 1.3e-26
"""
datafile = "testdata.txt"
with open("testdata.txt", "w") as fp:
fp.write(hetdata)
het = HeterodynedData(datafile)
assert len(het) == 4
assert (het.data.real[0] == -2.3e-25) and (het.data.real[-1] == -7.6e-26)
assert (het.data.imag[0] == 4.3e-26) and (het.data.imag[-1] == -8.9e-26)
assert (het.stds[0] == 1.1e-26) and (het.stds[-1] == 1.3e-26)
assert (het.vars[0] == (1.1e-26) ** 2) and (het.vars[-1] == (1.3e-26) ** 2)
assert (het.times[0].value == 1000000000.0) and (
het.times[-1].value == 1000000180.0
)
assert het.dt.value == 60.0
assert het.sample_rate.value == 1.0 / 60.0
os.remove(datafile) # clean up file
def test_write_text_data(self):
"""
Test that data can be correctly written (and re-read) from a text file.
"""
times = np.linspace(1000000000.0, 1000086340.0, 1440)
data = np.random.normal(0.0, 1e-25, size=(1440, 2))
het = HeterodynedData(data, times=times)
for suffix in ["txt", "txt.gz"]:
datafile = "testdata.{}".format(suffix)
het.write(datafile)
# read in data
hetnew = HeterodynedData.read(datafile)
assert np.array_equal(het.data, hetnew.data)
assert np.array_equal(het.times, hetnew.times)
# check things that the read-in data should not contain
assert hetnew.detector is None
assert hetnew.par is None
os.remove(datafile) # clean up file
def test_write_text_data_std(self):
"""
Test that data can be correctly written (and re-read) from a text file
with the standard deviations also output.
"""
times = np.linspace(1000000000.0, 1000086340.0, 1440)
data = np.random.normal(0.0, 1e-25, size=(1440, 2))
stds = 1e-25 * np.ones_like(times)
data = np.column_stack((data, stds))
het = HeterodynedData(data, times=times)
for suffix in ["txt", "txt.gz"]:
datafile = "testdata.{}".format(suffix)
het.write(datafile)
# read in data
hetnew = HeterodynedData.read(datafile)
assert np.array_equal(het.data, hetnew.data)
assert np.array_equal(het.times, hetnew.times)
assert np.array_equal(het.stds, hetnew.stds)
# check things that the read-in data should not contain
assert hetnew.detector is None
assert hetnew.par is None
os.remove(datafile) # clean up file
def test_write_hdf_data(self):
"""
Test that data can be correctly written (and re-read) from a HDF5 file.
"""
times = np.linspace(1000000000.0, 1000086340.0, 1440)
data = np.random.normal(0.0, 1e-25, size=(1440, 2))
det = "H1"
parcontent = """\
PSRJ J0123+3456
RAJ 01:23:45.6789
DECJ 34:56:54.321
F0 567.89
F1 -1.2e-12
PEPOCH 56789
H0 9.87e-26
COSIOTA 0.3
PSI 1.1
PHI0 2.4
"""
parfile = "J0123+3456.par"
# add content to the par file
with open(parfile, "w") as fp:
fp.write(parcontent)
het = HeterodynedData(data, times=times, detector=det, par=parfile)
for suffix in ["hdf5", "hdf", "h5"]:
datafile = "testdata.{}".format(suffix)
het.write(datafile, overwrite=True)
# read in data
hetnew = HeterodynedData.read(datafile)
assert np.array_equal(het.data, hetnew.data)
assert np.array_equal(het.times, hetnew.times)
# check that detector and par file are read in correctly
assert hetnew.detector == det
for key in het.par.as_dict():
if isinstance(hetnew.par[key], str):
assert hetnew.par[key] == het.par[key]
else:
assert np.allclose(hetnew.par[key], het.par[key])
# check version information is stored
assert het.cwinpy_version == hetnew.cwinpy_version
assert het.cwinpy_version == cwinpy.__version__
os.remove(datafile) # clean up file
os.remove(parfile)
def test_write_hdf_data_std(self):
"""
Test that data can be correctly written (and re-read) from a HDF5 file
with the standard deviations also output. Also, add an injection!
"""
times = np.linspace(1000000000.0, 1000086340.0, 1440)
data = np.random.normal(0.0, 1e-25, size=(1440, 2))
stds = 1e-25 * np.ones_like(times)
data = np.column_stack((data, stds))
det = "H1"
parcontent = """\
PSRJ J0123+3456
RAJ 01:23:45.6789
DECJ 34:56:54.321
F0 567.89
F1 -1.2e-12
PEPOCH 56789
H0 9.87e-26
COSIOTA 0.3
PSI 1.1
PHI0 2.4
"""
parfile = "J0123+3456.par"
# add content to the par file
with open(parfile, "w") as fp:
fp.write(parcontent)
het = HeterodynedData(data, times=times, detector=det, par=parfile, inject=True)
for suffix in ["hdf5", "hdf", "h5"]:
datafile = "testdata.{}".format(suffix)
het.write(datafile, overwrite=True)
# read in data
hetnew = HeterodynedData.read(datafile)
assert np.array_equal(het.data, hetnew.data)
assert np.array_equal(het.times, hetnew.times)
assert np.array_equal(het.stds, hetnew.stds)
assert hetnew.injection is True
assert np.array_equal(het.injection_data, hetnew.injection_data)
# check that detector and par file are read in correctly
assert hetnew.detector == det
for key in het.par.as_dict():
if key in hetnew.par.as_dict():
if isinstance(hetnew.par[key], str):
assert hetnew.par[key] == het.par[key]
assert hetnew.injpar[key] == het.injpar[key]
else:
assert np.allclose(hetnew.par[key], het.par[key])
assert np.allclose(hetnew.injpar[key], het.injpar[key])
os.remove(datafile) # clean up file
os.remove(parfile)
def test_zero_data(self):
"""
Test that data containing zeros is produced if only time stamps are
provided.
"""
# create "zero" data by only passing a set of times
times = np.linspace(1000000000.0, 1000086340.0, 1440)
het = HeterodynedData(times=times)
assert len(het) == len(times)
assert np.all(het.data == 0.0)
def test_array_data(self):
"""
Test passing the data as arrays containing times and data.
"""
times = np.linspace(1000000000.0, 1000086340.0, 1440)
data = np.random.normal(0.0, 1e-25, size=(1440, 2))
het = HeterodynedData(data, times=times)
assert np.all(het.times.value == times)
assert np.all(het.data.real == data[:, 0])
assert | np.all(het.data.imag == data[:, 1]) | numpy.all |
import numpy as np
from gym.envs.toy_text import discrete
import matplotlib.pyplot as plt
import matplotlib.cm as cm
class GridWorldEnv(discrete.DiscreteEnv):
metadata = {"render.modes": ["human", "ansi"]}
def __init__(self, grid, move_prob=0.8, default_reward=0.0):
# grid is 2d-array, and each value treated as attribute.
# attribute is
# 0: ordinary cell
# -1: damage cell (game end)
# 1: reward cell (game end)
self.grid = grid
if isinstance(grid, (list, tuple)):
self.grid = np.array(grid)
self._actions = {
"LEFT": 0,
"DOWN": 1,
"RIGHT": 2,
"UP": 3,
}
self.default_reward = default_reward
self.move_prob = move_prob
num_states = self.nrow * self.ncol
num_actions = len(self._actions)
# start from left down
initial_state_prob = np.zeros(num_states)
initial_state_prob[self.coordinate_to_state(self.nrow - 1, 0)] = 1.0
# Make transitions
P = {}
for s in range(num_states):
if s not in P:
P[s] = {}
reward = self.reward_func(s)
done = self.has_done(s)
if done:
# Terminal state
for a in range(num_actions):
P[s][a] = []
P[s][a].append([1.0, s, reward, done])
else:
for a in range(num_actions):
P[s][a] = []
transition_probs = self.transit_func(s, a)
for n_s in transition_probs:
reward = self.reward_func(n_s)
done = self.has_done(s)
P[s][a].append([transition_probs[n_s], n_s,
reward, done])
self.P = P
super().__init__(num_states, num_actions, P, initial_state_prob)
@property
def nrow(self):
return self.grid.shape[0]
@property
def ncol(self):
return self.grid.shape[1]
@property
def shape(self):
return self.grid.shape
@property
def actions(self):
return list(range(self.action_space.n))
@property
def states(self):
return list(range(self.observation_space.n))
def state_to_coordinate(self, s):
row, col = divmod(s, self.nrow)
return row, col
def coordinate_to_state(self, row, col):
index = row * self.nrow + col
return index
def state_to_feature(self, s):
feature = np.zeros(self.observation_space.n)
feature[s] = 1.0
return feature
def transit_func(self, state, action):
transition_probs = {}
opposite_direction = (action + 2) % 4
candidates = [a for a in range(len(self._actions))
if a != opposite_direction]
for a in candidates:
prob = 0
if a == action:
prob = self.move_prob
else:
prob = (1 - self.move_prob) / 2
next_state = self._move(state, a)
if next_state not in transition_probs:
transition_probs[next_state] = prob
else:
transition_probs[next_state] += prob
return transition_probs
def reward_func(self, state):
row, col = self.state_to_coordinate(state)
reward = self.grid[row][col]
return reward
def has_done(self, state):
row, col = self.state_to_coordinate(state)
reward = self.grid[row][col]
if np.abs(reward) == 1:
return True
else:
return False
def _move(self, state, action):
next_state = state
row, col = self.state_to_coordinate(state)
next_row, next_col = row, col
# Move state by action
if action == self._actions["LEFT"]:
next_col -= 1
elif action == self._actions["DOWN"]:
next_row += 1
elif action == self._actions["RIGHT"]:
next_col += 1
elif action == self._actions["UP"]:
next_row -= 1
# Check the out of grid
if not (0 <= next_row < self.nrow):
next_row, next_col = row, col
if not (0 <= next_col < self.ncol):
next_row, next_col = row, col
next_state = self.coordinate_to_state(next_row, next_col)
return next_state
def plot_on_grid(self, values):
if len(values.shape) < 2:
values = values.reshape(self.shape)
fig, ax = plt.subplots()
ax.imshow(values, cmap=cm.RdYlGn)
ax.set_xticks( | np.arange(self.ncol) | numpy.arange |
"""
Attribute: data structure for 1-dimensional cross-sectional data
"""
import numpy as np
from bisect import bisect_right
from random import uniform
from pandas import Series, DataFrame
from dateutil.parser import parse
from datetime import datetime, timedelta
import ds4ml
from ds4ml import utils
class Attribute(Series):
_epoch = datetime(1970, 1, 1) # for datetime handling
def __init__(self, data, name=None, dtype=None, index=None, copy=False,
fastpath=False, categorical=False):
"""
A Series with extra information, e.g. categorical.
Parameters
----------
categorical : bool
set categorical label for attribute. If categorical, this attribute
takes on a limited and fixed number of possible values. Examples:
blood type, gender.
"""
Series.__init__(self, data, name=name, dtype=dtype, index=index,
copy=copy, fastpath=fastpath)
# bins can be int (size of histogram bins), str (as algorithm name),
self._bins = ds4ml.params['attribute.bins']
self._min = None
self._max = None
self._step = None
# probability distribution (pr)
self.bins = None
self.prs = None
from pandas.api.types import infer_dtype
# atype: date type for handle different kinds of attributes in data
# synthesis, support: integer, float, string, datetime.
self.atype = infer_dtype(self, skipna=True)
if self.atype == 'integer':
pass
elif self.atype == 'floating' or self.atype == 'mixed-integer-float':
self.atype = 'float'
elif self.atype in ['string', 'mixed-integer', 'mixed']:
self.atype = 'string'
if all(map(utils.is_datetime, self._values)):
self.atype = 'datetime'
# fill the missing values with the most frequent value
self.fillna(self.mode()[0], inplace=True)
# special handling for datetime attribute
if self.atype == 'datetime':
self.update(self.map(self._to_seconds).map(self._date_formatter))
if self.atype == 'float':
self._decimals = self.decimals()
# how to define the attribute is categorical.
self.categorical = categorical or (
self.atype == 'string' and not self.is_unique)
self._set_domain()
self._set_distribution()
# handling functions for datetime attribute
def _to_seconds(self, timestr):
return int((parse(timestr) - self._epoch).total_seconds())
def _date_formatter(self, seconds):
date = self._epoch + timedelta(seconds=seconds)
return '%d/%d/%d' % (date.month, date.day, date.year)
# Take pandas.Series as manipulation result.
@property
def _constructor(self):
return Series
@property
def _constructor_expanddim(self):
from ds4ml.dataset import DataSet
return DataSet
@property
def is_numerical(self):
return self.atype == 'integer' or self.atype == 'float'
@property
def domain(self):
"""
Return attribute's domain, which can be a list of values for categorical
attribute, and an interval with min/max value for non-categorical
attribute.
"""
if self.categorical:
return self.bins
else:
return [self._min, self._max]
@domain.setter
def domain(self, domain: list):
"""
Set attribute's domain, includes min, max, frequency, or distribution.
Generally, the domain of one attribute can be calculated automatically.
This method can be manually called for specific purposes, e.g. compare
two same attributes based on same domain.
Parameters
----------
domain : list
domain of one attribute. For numerical or datetime attributes, it
should be a list of two elements [min, max]; For categorical
attributes, it should a list of potential values of this attribute.
"""
# if a attribute is numerical and categorical and domain's length is
# bigger than 2, take it as categorical. e.g. zip code.
if self.atype == 'datetime':
domain = list(map(self._to_seconds, domain))
if (self.is_numerical and self.categorical and len(domain) > 2) or (
self.categorical):
self._min = min(domain)
self._max = max(domain)
self.bins = np.array(domain)
elif self.is_numerical:
self._min, self._max = domain
self._step = (self._max - self._min) / self._bins
self.bins = np.array([self._min, self._max])
elif self.atype == 'string':
lengths = [len(str(i)) for i in domain]
self._min = min(lengths)
self._max = max(lengths)
self.bins = np.array(domain)
self._set_distribution()
def _set_domain(self):
"""
Compute domain (min, max, distribution bins) from input data
"""
if self.atype == 'string':
self._items = self.astype(str).map(len)
self._min = int(self._items.min())
self._max = int(self._items.max())
if self.categorical:
self.bins = self.unique()
else:
self.bins = np.array([self._min, self._max])
elif self.atype == 'datetime':
self.update(self.map(self._to_seconds))
if self.categorical:
self.bins = self.unique()
else:
self._min = float(self.min())
self._max = float(self.max())
self.bins = np.array([self._min, self._max])
self._step = (self._max - self._min) / self._bins
else:
self._min = float(self.min())
self._max = float(self.max())
if self.categorical:
self.bins = self.unique()
else:
self.bins = np.array([self._min, self._max])
self._step = (self._max - self._min) / self._bins
def _set_distribution(self):
if self.categorical:
counts = self.value_counts()
for value in set(self.bins) - set(counts.index):
counts[value] = 0
counts.sort_index(inplace=True)
if self.atype == 'datetime':
counts.index = list(map(self._date_formatter, counts.index))
self._counts = counts.values
self.prs = utils.normalize_distribution(counts)
self.bins = np.array(counts.index)
else:
# Note: hist, edges = numpy.histogram(), all but the last bin
# is half-open. If bins is 20, then len(hist)=20, len(edges)=21
if self.atype == 'string':
hist, edges = np.histogram(self._items,
bins=self._bins)
else:
hist, edges = np.histogram(self, bins=self._bins,
range=(self._min, self._max))
self.bins = edges[:-1] # Remove the last bin edge
self._counts = hist
self.prs = utils.normalize_distribution(hist)
if self.atype == 'integer':
self._min = int(self._min)
self._max = int(self._max)
def counts(self, bins=None, normalize=True):
"""
Return an array of counts (or normalized density) of unique values.
This function works with `attribute.bins`. Combination of both are
like `Series.value_counts`. The parameter `bins` can be none, or a list.
"""
if bins is None:
return self._counts
if self.categorical:
if self.atype == 'datetime':
bins = list(map(self._to_seconds, bins))
counts = self.value_counts()
for value in set(bins) - set(counts.index):
counts[value] = 0
if normalize:
return np.array([round(counts.get(b)/sum(counts) * 100, 2)
for b in bins])
else:
return np.array([counts.get(b) for b in bins])
else:
if len(bins) == 1:
return np.array([self.size])
hist, _ = | np.histogram(self, bins=bins) | numpy.histogram |
import unittest
import glob
import os
from os.path import join, dirname
from datetime import date
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from insar import timeseries
from apertools import sario
class TestInvertSbas(unittest.TestCase):
def setUp(self):
# self.jsonfile = tempfile.NamedTemporaryFile(mode='w+')
self.igram_path = join(dirname(__file__), "data", "sbas_test")
self.slclist_path = join(self.igram_path, "slclist")
self.ifglist_path = join(self.igram_path, "ifglist")
self.actual_time_diffs = np.array([2, 6, 4])
def tearDown(self):
for f in glob.glob(join(self.igram_path, "*flat*")):
os.remove(f)
def test_time_diff(self):
slclist = sario.find_geos(self.slclist_path)
time_diffs = timeseries.find_time_diffs(slclist)
assert_array_equal(self.actual_time_diffs, time_diffs)
def test_read_slclist(self):
slclist = sario.find_geos(self.slclist_path)
expected = [
date(2018, 4, 20),
date(2018, 4, 22),
date(2018, 4, 28),
date(2018, 5, 2),
]
self.assertEqual(slclist, expected)
def test_read_ifglist(self):
ifglist = sario.find_igrams(self.ifglist_path)
expected = [
(date(2018, 4, 20), date(2018, 4, 22)),
(date(2018, 4, 20), date(2018, 4, 28)),
(date(2018, 4, 22), date(2018, 4, 28)),
(date(2018, 4, 22), date(2018, 5, 2)),
(date(2018, 4, 28), date(2018, 5, 2)),
]
self.assertEqual(ifglist, expected)
expected = [
"data/sbas_test/20180420_20180422.int",
"data/sbas_test/20180420_20180428.int",
"data/sbas_test/20180422_20180428.int",
"data/sbas_test/20180422_20180502.int",
"data/sbas_test/20180428_20180502.int",
]
igram_files = sario.find_igrams(self.ifglist_path, parse=False)
# Remove all but last part to ignore where we are running this
igram_files = [os.sep.join(f.split(os.sep)[-3:]) for f in igram_files]
self.assertEqual(igram_files, expected)
def test_build_A_matrix(self):
slclist = sario.find_geos(self.slclist_path)
ifglist = sario.find_igrams(self.ifglist_path)
expected_A = np.array(
[
[1, 0, 0],
[0, 1, 0],
[-1, 1, 0],
[-1, 0, 1],
[0, -1, 1],
]
)
A = timeseries.build_A_matrix(slclist, ifglist)
| assert_array_equal(expected_A, A) | numpy.testing.assert_array_equal |
import numpy as np
from numpy.linalg import det
from scipy.optimize import fsolve
from scipy.stats import norm
import itertools
from .constants import *
def check_condition(state, condition):
# Define a success condition for each state, e.g., mutual exclusivity.
if condition==EXCLUSIVITY:
if sum(state)==1:
return True
else:
return False
elif condition==ANY_CO_OCCURRENCE:
if sum(state)>1:
return True
else:
return False
elif condition==ALL_CO_OCCURRENCE:
if sum(state)==len(state):
return True
else:
return False
else:
raise NotImplementedError('{} not implemented'.format(condition))
def enumeration(k, condition):
# Enumerate the states for the observed variables and identify the indices for the states that
# satisfy the given condition, e.g., mutual exclusivity.
states = list(itertools.product([0, 1], repeat=k))
indices = []
for i, state in enumerate(states):
a = [j for j, s in enumerate(state) if s==1]
if check_condition(state, condition):
a += [k]
indices.append(a)
# Identify the indices for each term of the gradient vector and the Hessian matrix.
gradient_indices = []
for i in range(k+1):
b = [j for j, a in enumerate(indices) if i in a]
gradient_indices.append(b)
hessian_indices = []
for i in range(k+1):
b = []
for j in range(k+1):
c = [l for l, a in enumerate(indices) if i in a and j in a]
b.append(c)
hessian_indices.append(b)
return states, indices, gradient_indices, hessian_indices
def saddlepoint(observed_t, observed_y, probabilities, condition='exclusivity'):
# Find the dimensions of the observations.
k, n = np.shape(probabilities)
# Enumerate the states for the observed variables and identify indices for the terms.
states, indices, gradient_indices, hessian_indices = enumeration(k, condition)
# Collect the observations and perform the continuity correction for t.
y = np.zeros(k+1)
y[0:k] = observed_y
y[k] = observed_t-0.5
# Precompute the products of the success and failure probabilities.
p = np.zeros((2, k, n))
p[0] = 1.0-np.array(probabilities)
p[1] = np.array(probabilities)
w = np.zeros((2**k, n))
for i, state in enumerate(states):
w[i, :] = np.product(p[state, range(k), :], axis=0)
# Define the moment generating functions and cumulant generating functions. These functions
# use the above constants.
def compute_terms(x):
terms = np.zeros((2**k, n))
for i, s in enumerate(indices):
terms[i, :] = np.exp( | np.sum(x[s]) | numpy.sum |
# Authors: <NAME> <<EMAIL>>
# <NAME>
#
# License: BSD (3-clause)
import logging
import warnings
import numpy as np
from scipy import linalg
from numpy.linalg import pinv
from .asr_utils import (geometric_median, fit_eeg_distribution, yulewalk,
yulewalk_filter, ma_filter, block_covariance)
class ASR():
"""Artifact Subspace Reconstruction.
Artifact subspace reconstruction (ASR) is an automated, online,
component-based artifact removal method for removing transient or
large-amplitude artifacts in multi-channel EEG recordings [1]_.
Parameters
----------
sfreq : float
Sampling rate of the data, in Hz.
cutoff: float
Standard deviation cutoff for rejection. X portions whose variance
is larger than this threshold relative to the calibration data are
considered missing data and will be removed. The most aggressive value
that can be used without losing too much EEG is 2.5. Recommended to
use with more conservative values ranging from 20 - 30.
Defaults to 20.
blocksize : int
Block size for calculating the robust data covariance and thresholds,
in samples; allows to reduce the memory and time requirements of the
robust estimators by this factor (down to Channels x Channels x Samples
x 16 / Blocksize bytes) (default=100).
win_len : float
Window length (s) that is used to check the data for artifact content.
This is ideally as long as the expected time scale of the artifacts but
not shorter than half a cycle of the high-pass filter that was used
(default=0.5).
win_overlap : float
Window overlap fraction. The fraction of two successive windows that
overlaps. Higher overlap ensures that fewer artifact portions are going
to be missed, but is slower (default=0.66).
max_dropout_fraction : float
Maximum fraction of windows that can be subject to signal dropouts
(e.g., sensor unplugged), used for threshold estimation (default=0.1).
min_clean_fraction : float
Minimum fraction of windows that need to be clean, used for threshold
estimation (default=0.25).
ab : 2-tuple | None
Coefficients (A, B) of an IIR filter that is used to shape the
spectrum of the signal when calculating artifact statistics. The
output signal does not go through this filter. This is an optional way
to tune the sensitivity of the algorithm to each frequency component
of the signal. The default filter is less sensitive at alpha and beta
frequencies and more sensitive at delta (blinks) and gamma (muscle)
frequencies. Defaults to None.
max_bad_chans : float
The maximum number or fraction of bad channels that a retained window
may still contain (more than this and it is removed). Reasonable range
is 0.05 (very clean output) to 0.3 (very lax cleaning of only coarse
artifacts) (default=0.2).
method : {'riemann', 'euclid'}
Method to use. If riemann, use the riemannian-modified version of
ASR [2]_. Currently, only euclidean ASR is supported. Defaults to
"euclid".
Attributes
----------
sfreq: array, shape=(n_channels, filter_order)
Filter initial conditions.
cutoff: float
Standard deviation cutoff for rejection.
blocksize : int
Block size for calculating the robust data covariance and thresholds.
win_len : float
Window length (s) that is used to check the data for artifact content.
win_overlap : float
Window overlap fraction.
max_dropout_fraction : float
Maximum fraction of windows that can be subject to signal dropouts.
min_clean_fraction : float
Minimum fraction of windows.
max_bad_chans : float
The maximum fraction of bad channels.
method : {'riemann', 'euclid'}
Method to use.
A, B: arrays
Coefficients of an IIR filter that is used to shape the spectrum of the
signal when calculating artifact statistics. The output signal does not
go through this filter. This is an optional way to tune the sensitivity
of the algorithm to each frequency component of the signal. The default
filter is less sensitive at alpha and beta frequencies and more
sensitive at delta (blinks) and gamma (muscle) frequencies.
M : array, shape=(channels, channels)
The mixing matrix to fit ASR data.
T : array, shape=(channels, channels)
The mixing matrix to fit ASR data.
References
----------
.. [1] <NAME>., & <NAME>. (2016). U.S. Patent Application No.
14/895,440. https://patents.google.com/patent/US20160113587A1/en
.. [2] <NAME>., <NAME>., <NAME>., & <NAME>.
(2019). A Riemannian Modification of Artifact Subspace Reconstruction
for EEG Artifact Handling. Frontiers in Human Neuroscience, 13.
https://doi.org/10.3389/fnhum.2019.00141
"""
def __init__(self, sfreq, cutoff=20, blocksize=100, win_len=0.5,
win_overlap=0.66, max_dropout_fraction=0.1,
min_clean_fraction=0.25, ab=None, max_bad_chans=0.1,
method="euclid"):
# set attributes
self.sfreq = sfreq
self.cutoff = cutoff
self.blocksize = blocksize
self.win_len = win_len
self.win_overlap = win_overlap
self.max_dropout_fraction = max_dropout_fraction
self.min_clean_fraction = min_clean_fraction
self.max_bad_chans = max_bad_chans
self.method = "euclid" # NOTE: riemann is not yet available
self._fitted = False
# set default yule-walker filter
if ab is None:
yw_f = np.array([0, 2, 3, 13, 16, 40,
np.minimum(80.0, (self.sfreq / 2.0) - 1.0),
self.sfreq / 2.0]) * 2.0 / self.sfreq
yw_m = np.array([3, 0.75, 0.33, 0.33, 1, 1, 3, 3])
self.B, self.A = yulewalk(8, yw_f, yw_m)
else:
self.A, self.B = ab
self._reset()
def _reset(self):
"""Reset state variables."""
self.M = None
self.T = None
# TODO: The following parameters are effectively not used. Still,
# they can be set manually via asr.transform(return_states=True)
self.R = None
self.carry = None
self.Zi = None
self.cov = None
self._fitted = False
def fit(self, raw, picks="eeg", start=0, stop=None,
return_clean_window=False):
"""Calibration for the Artifact Subspace Reconstruction method.
The input to this data is a multi-channel time series of calibration
data. In typical uses the calibration data is clean resting EEG data
of data if the fraction of artifact content is below the breakdown
point of the robust statistics used for estimation (50% theoretical,
~30% practical). If the data has a proportion of more than 30-50%
artifacts then bad time windows should be removed beforehand. This
data is used to estimate the thresholds that are used by the ASR
processing function to identify and remove artifact components.
The calibration data must have been recorded for the same cap design
from which data for cleanup will be recorded, and ideally should be
from the same session and same subject, but it is possible to reuse
the calibration data from a previous session and montage to the
extent that the cap is placed in the same location (where loss in
accuracy is more or less proportional to the mismatch in cap
placement).
Parameters
----------
raw : instance of mne.io.Raw
Instance of mne.io.Raw to be used for fitting the ASR.
The calibration data should have been high-pass filtered (for
example at 0.5Hz or 1Hz using a Butterworth IIR filter), and be
reasonably clean not less than 30 seconds (this method is
typically used with 1 minute or more).
picks : str | list | slice | None
Channels used to fit the ASR. All channels should be of the same
type (e.g. "eeg", "grads"). Slices and lists of integers will
be interpreted as channel indices. In lists, channel
name strings (e.g., ['MEG0111', 'MEG2623'] will pick the given
channels. Note that channels in info['bads'] will be included if
their names or indices are explicitly provided. Defaults to "eeg".
start : int
The first sample to use for fitting the data. Defaults to 0.
stop : int | None
The last sample to use for fitting the data. If `None`, all
samples after `start` will be used for fitting. Defaults to None.
return_clean_window : Bool
If True, the method will return the variables `clean` (the cropped
dataset which was used to fit the ASR) and `sample_mask` (a
logical mask of which samples were included/excluded from fitting
the ASR). Defaults to False.
Returns
-------
clean : array, shape=(n_channels, n_samples)
The cropped version of the dataset which was used to calibrate
the ASR. This array is a result of the `clean_windows` function
and no ASR was applied to it.
sample_mask : boolean array, shape=(1, n_samples)
Logical mask of the samples which were used to train the ASR.
"""
# extract the data
X = raw.get_data(picks=picks, start=start, stop=stop)
# Find artifact-free windows first
clean, sample_mask = clean_windows(
X,
sfreq=self.sfreq,
win_len=self.win_len,
win_overlap=self.win_overlap,
max_bad_chans=self.max_bad_chans,
min_clean_fraction=self.min_clean_fraction,
max_dropout_fraction=self.max_dropout_fraction)
# Perform calibration
self.M, self.T = asr_calibrate(
clean,
sfreq=self.sfreq,
cutoff=self.cutoff,
blocksize=self.blocksize,
win_len=self.win_len,
win_overlap=self.win_overlap,
max_dropout_fraction=self.max_dropout_fraction,
min_clean_fraction=self.min_clean_fraction,
ab=(self.A, self.B),
method=self.method)
self._fitted = True
# return data if required
if return_clean_window:
return clean, sample_mask
def transform(self, raw, picks="eeg", lookahead=0.25, stepsize=32,
maxdims=0.66, return_states=False, mem_splits=3):
"""Apply Artifact Subspace Reconstruction.
Parameters
----------
raw : instance of mne.io.Raw
Instance of mne.io.Raw to be transformed by the ASR.
picks : str | list | slice | None
Channels to be transformed by the ASR. Should be the same set of
channels as used by `ASR.fit()`. All channels should be of the
same type (e.g. "eeg", "grads"). Slices and lists of integers will
be interpreted as channel indices. In lists, channel
name strings (e.g., ['MEG0111', 'MEG2623'] will pick the given
channels. Note that channels in info['bads'] will be included if
their names or indices are explicitly provided. Defaults to "eeg".
lookahead : float
Amount of look-ahead that the algorithm should use (in seconds).
This value should be between 0 (no lookahead) and WindowLength/2
(optimal lookahead). The recommended value is WindowLength/2.
Default: 0.25
Note: Other than in `asr_process`, the signal will be readjusted
to eliminate any temporal jitter and automatically readjust it to
the correct time points. Zero-padding will be applied to the last
`lookahead` portion of the data, possibly resulting in inaccuracies
for the final `lookahead` seconds of the recording.
stepsize : int
The steps in which the algorithm will be updated. The larger this
is, the faster the algorithm will be. The value must not be larger
than WindowLength * SamplingRate. The minimum value is 1 (update
for every sample) while a good value would be sfreq//3. Note that
an update is always performed also on the first and last sample of
the data chunk. Default: 32
max_dims : float, int
Maximum dimensionality of artifacts to remove. This parameter
denotes the maximum number of dimensions which can be removed from
each segment. If larger than 1, `int(max_dims)` will denote the
maximum number of dimensions removed from the data. If smaller
than 1, `max_dims` describes a fraction of total dimensions.
Defaults to 0.66.
return_states : bool
If True, returns a dict including the updated states {"M":M,
"T":T, "R":R, "Zi":Zi, "cov":cov, "carry":carry}. Defaults to
False.
mem_splits : int
Split the array in `mem_splits` segments to save memory.
Returns
-------
out : array, shape=(n_channels, n_samples)
Filtered data.
"""
# extract the data
X = raw.get_data(picks=picks)
# add lookahead padding at the end
lookahead_samples = int(self.sfreq * lookahead)
X = np.concatenate([X,
np.zeros([X.shape[0], lookahead_samples])],
axis=1)
# apply ASR
X = asr_process(X, self.sfreq, self.M, self.T, self.win_len,
lookahead, stepsize, maxdims, (self.A, self.B),
self.R, self.Zi, self.cov, self.carry,
return_states, self.method, mem_splits)
# remove lookahead portion from start
X = X[:, lookahead_samples:]
# Return a modifier raw instance
raw = raw.copy()
raw.apply_function(lambda x: X, picks=picks,
channel_wise=False)
return raw
def asr_calibrate(X, sfreq, cutoff=5, blocksize=100, win_len=0.5,
win_overlap=0.66, max_dropout_fraction=0.1,
min_clean_fraction=0.25, ab=None, method='euclid'):
"""Calibration function for the Artifact Subspace Reconstruction method.
This function can be used if you inted to apply ASR to a simple numpy
array instead of a mne.io.Raw object. It is equivalent to the MATLAB
implementation of asr_calibrate (except for some small differences
introduced by solvers for the eigenspace functions etc).
The input to this data is a multi-channel time series of calibration data.
In typical uses the calibration data is clean resting EEG data of ca. 1
minute duration (can also be longer). One can also use on-task data if the
fraction of artifact content is below the breakdown point of the robust
statistics used for estimation (50% theoretical, ~30% practical). If the
data has a proportion of more than 30-50% artifacts then bad time windows
should be removed beforehand. This data is used to estimate the thresholds
that are used by the ASR processing function to identify and remove
artifact components.
The calibration data must have been recorded for the same cap design from
which data for cleanup will be recorded, and ideally should be from the
same session and same subject, but it is possible to reuse the calibration
data from a previous session and montage to the extent that the cap is
placed in the same location (where loss in accuracy is more or less
proportional to the mismatch in cap placement).
The calibration data should have been high-pass filtered (for example at
0.5Hz or 1Hz using a Butterworth IIR filter).
Parameters
----------
X : array, shape=(n_channels, n_samples)
*zero-mean* (e.g., high-pass filtered) and reasonably clean EEG of not
much less than 30 seconds (this method is typically used with 1 minute
or more).
sfreq : float
Sampling rate of the data, in Hz.
cutoff: float
Standard deviation cutoff for rejection. X portions whose variance
is larger than this threshold relative to the calibration data are
considered missing data and will be removed. The most aggressive value
that can be used without losing too much EEG is 2.5. Defaults to 5
(according to the original default in EEGLab's `clean_rawdata`).
blocksize : int
Block size for calculating the robust data covariance and thresholds,
in samples; allows to reduce the memory and time requirements of the
robust estimators by this factor (down to n_chans x n_chans x
n_samples x 16 / blocksize bytes) (default=100).
win_len : float
Window length that is used to check the data for artifact content.
This is ideally as long as the expected time scale of the artifacts
but short enough to allow for several 1000 windows to compute
statistics over (default=0.5).
win_overlap : float
Window overlap fraction. The fraction of two successive windows that
overlaps. Higher overlap ensures that fewer artifact portions are
going to be missed, but is slower (default=0.66).
max_dropout_fraction : float
Maximum fraction of windows that can be subject to signal dropouts
(e.g., sensor unplugged), used for threshold estimation (default=0.1).
min_clean_fraction : float
Minimum fraction of windows that need to be clean, used for threshold
estimation (default=0.25).
ab : 2-tuple | None
Coefficients (A, B) of an IIR filter that is used to shape the
spectrum of the signal when calculating artifact statistics. The
output signal does not go through this filter. This is an optional way
to tune the sensitivity of the algorithm to each frequency component
of the signal. The default filter is less sensitive at alpha and beta
frequencies and more sensitive at delta (blinks) and gamma (muscle)
frequencies. Defaults to None.
method : {'euclid', 'riemann'}
Metric to compute the covariance matrix average. For now, only
euclidean ASR is supported.
Returns
-------
M : array
Mixing matrix.
T : array
Threshold matrix.
"""
if method == "riemann":
warnings.warn("Riemannian ASR is not yet supported. Switching back to"
" Euclidean ASR.")
method == "euclid"
logging.debug('[ASR] Calibrating...')
# set number of channels and number of samples
[nc, ns] = X.shape
# filter the data
X, _zf = yulewalk_filter(X, sfreq, ab=ab)
# window length for calculating thresholds
N = int(np.round(win_len * sfreq))
# get block covariances
U = block_covariance(X, window=blocksize)
# get geometric median for each block
# Note: riemann mode is not yet supported, else this could be:
# Uavg = pyriemann.utils.mean_covariance(U, metric='riemann')
Uavg = geometric_median(U.reshape((-1, nc * nc)) / blocksize)
Uavg = Uavg.reshape((nc, nc))
# get the mixing matrix M
M = linalg.sqrtm(np.real(Uavg))
# sort the get the sorted eigenvecotors/eigenvalues
# riemann is not yet supported, else this could be PGA/nonlinear eigenvs
D, Vtmp = linalg.eigh(M)
V = Vtmp[:, np.argsort(D)] # I think numpy sorts them automatically
# get the threshold matrix T
x = np.abs(np.dot(V.T, X))
offsets = np.int_(np.arange(0, ns - N, np.round(N * (1 - win_overlap))))
# go through all the channels and fit the EEG distribution
mu = np.zeros(nc)
sig = np.zeros(nc)
for ichan in reversed(range(nc)):
rms = x[ichan, :] ** 2
Y = []
for o in offsets:
Y.append(np.sqrt( | np.sum(rms[o:o + N]) | numpy.sum |
import sys
sys.path.append('../../')
from collections import Counter
from sklearn.linear_model import LinearRegression
import os
import numpy as np
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
from nfp.preprocessing import MolPreprocessor, GraphSequence
import gzip
import pickle
import pandas as pd
# Define Keras model
import keras
import keras.backend as K
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from keras.layers import (Input, Embedding, Dense, BatchNormalization, Dropout,
Concatenate, Multiply, Add)
from keras.models import Model, load_model
from nfp.layers import (MessageLayer, GRUStep, Squeeze, EdgeNetwork,
ReduceAtomToMol, ReduceBondToAtom,
GatherAtomToBond, ReduceAtomToPro)
from nfp.models import GraphModel
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--restart', action='store_true')
args = parser.parse_args()
train = pd.read_pickle('train.pkl.gz')
valid = pd.read_pickle('valid.pkl.gz')
y_train = train.Shift.values
y_valid = valid.Shift.values
def rbf_expansion(distances, mu=0, delta=0.04, kmax=256):
k = np.arange(0, kmax)
logits = -(np.atleast_2d(distances).T - (-mu + delta * k))**2 / delta
return np.exp(logits)
def atomic_number_tokenizer(atom):
return atom.GetNumRadicalElectrons()
def _compute_stacked_offsets(sizes, repeats):
return np.repeat(np.cumsum(np.hstack([0, sizes[:-1]])), repeats)
class RBFSequence(GraphSequence):
def process_data(self, batch_data):
batch_data['distance_rbf'] = rbf_expansion(batch_data['distance'])
offset = _compute_stacked_offsets(
batch_data['n_pro'], batch_data['n_atom'])
offset = | np.where(batch_data['atom_index']>=0, offset, 0) | numpy.where |
# python problem4.py --u 230
# http://fourier.eng.hmc.edu/e161/lectures/ColorProcessing/node3.html
import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-u", "--u", type=int, required=True,
help="Value of U between 0 and 255")
args = vars(ap.parse_args())
def angle_between(p1, p2):
ang1 = | np.arctan2(*p1[::-1]) | numpy.arctan2 |
import os
import glob
import hashlib
import gc
import time
import numpy as np
import requests
import contextlib
from tqdm import tqdm
import torch
import random
from logging import log
from os.path import join as pjoin, exists as pexists
import json
import pickle
import pandas as pd
from .gams.utils import extract_GAM, bin_data
def download(url, filename, delete_if_interrupted=True, chunk_size=4096):
""" saves file from url to filename with a fancy progressbar """
try:
with open(filename, "wb") as f:
print("Downloading {} > {}".format(url, filename))
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
f.write(response.content)
else:
total_length = int(total_length)
with tqdm(total=total_length) as progressbar:
for data in response.iter_content(chunk_size=chunk_size):
if data: # filter-out keep-alive chunks
f.write(data)
progressbar.update(len(data))
except Exception as e:
if delete_if_interrupted:
print("Removing incomplete download {}.".format(filename))
os.remove(filename)
raise e
return filename
def iterate_minibatches(*tensors, batch_size, shuffle=True, epochs=1,
allow_incomplete=True, callback=lambda x:x):
indices = np.arange(len(tensors[0]))
upper_bound = int((np.ceil if allow_incomplete else np.floor) (len(indices) / batch_size)) * batch_size
epoch = 0
while True:
if shuffle:
np.random.shuffle(indices)
for batch_start in callback(range(0, upper_bound, batch_size)):
batch_ix = indices[batch_start: batch_start + batch_size]
batch = [tensor[batch_ix] if tensor is not None else None
for tensor in tensors]
yield batch if len(tensors) > 1 else batch[0]
epoch += 1
if epoch >= epochs:
break
def process_in_chunks(function, *args, batch_size, out=None, **kwargs):
"""
Computes output by applying batch-parallel function to large data tensor in chunks
:param function: a function(*[x[indices, ...] for x in args]) -> out[indices, ...]
:param args: one or many tensors, each [num_instances, ...]
:param batch_size: maximum chunk size processed in one go
:param out: memory buffer for out, defaults to torch.zeros of appropriate size and type
:returns: function(data), computed in a memory-efficient way
"""
total_size = args[0].shape[0]
first_output = function(*[x[0: batch_size] for x in args])
output_shape = (total_size,) + tuple(first_output.shape[1:])
if out is None:
out = torch.zeros(*output_shape, dtype=first_output.dtype, device=first_output.device,
layout=first_output.layout, **kwargs)
out[0: batch_size] = first_output
for i in range(batch_size, total_size, batch_size):
batch_ix = slice(i, min(i + batch_size, total_size))
out[batch_ix] = function(*[x[batch_ix] for x in args])
return out
def check_numpy(x):
""" Makes sure x is a numpy array """
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
x = | np.asarray(x) | numpy.asarray |
import math
import os
import sys
import time
from numpy.random import seed
import numpy as np
# to deal with numpy randomness
# seed(1234)
import tensorflow.compat.v1 as tf
# added to deal with randomness
tf.set_random_seed(1234)
import tqdm
from numpy import random
from utils import *
def get_cosine_similarity(embeddings1, embeddings2, node):
try:
vector1 = embeddings1[node]
vector2 = embeddings2[node]
return np.dot(vector1, vector2) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))
except:
return 2+random.random()
def get_batches(pairs, neighbors, batch_size):
n_batches = (len(pairs) + (batch_size - 1)) // batch_size
for idx in range(n_batches):
x, y, t, neigh = [], [], [], []
for i in range(batch_size):
index = idx * batch_size + i
if index >= len(pairs):
break
x.append(pairs[index][0])
y.append(pairs[index][1])
t.append(pairs[index][2])
neigh.append(neighbors[pairs[index][0]])
yield (np.array(x).astype(np.int32), np.array(y).reshape(-1, 1).astype(np.int32), np.array(t).astype(np.int32), np.array(neigh).astype(np.int32))
def train_model(network_data, log_name, store_file, all_walks):
vocab, index2word = generate_vocab(all_walks)
train_pairs = generate_pairs(all_walks, vocab, args.window_size)
edge_types = list(network_data.keys())
print('edge types: '+str(list(network_data.keys())))
num_nodes = len(index2word)
edge_type_count = len(edge_types)
print('edge types '+ str(edge_types))
epochs = args.epoch
batch_size = args.batch_size
embedding_size = args.dimensions # Dimension of the embedding vector.
embedding_u_size = args.edge_dim
u_num = edge_type_count
num_sampled = args.negative_samples # Number of negative examples to sample.
dim_a = args.att_dim
att_head = 1
neighbor_samples = args.neighbor_samples
neighbors = [[[] for __ in range(edge_type_count)] for _ in range(num_nodes)]
for r in range(edge_type_count):
g = network_data[edge_types[r]]
for (x, y) in g:
ix = vocab[x].index
iy = vocab[y].index
neighbors[ix][r].append(iy)
neighbors[iy][r].append(ix)
for i in range(num_nodes):
if len(neighbors[i][r]) == 0:
neighbors[i][r] = [i] * neighbor_samples
elif len(neighbors[i][r]) < neighbor_samples:
neighbors[i][r].extend(list(np.random.choice(neighbors[i][r], size=neighbor_samples-len(neighbors[i][r]))))
elif len(neighbors[i][r]) > neighbor_samples:
neighbors[i][r] = list(np.random.choice(neighbors[i][r], size=neighbor_samples))
graph = tf.Graph()
with graph.as_default():
global_step = tf.Variable(0, name='global_step', trainable=False)
# Parameters to learn
node_embeddings = tf.Variable(tf.random.uniform([num_nodes, embedding_size], -1.0, 1.0))
print(node_embeddings.get_shape().as_list())
node_type_embeddings = tf.Variable(tf.random.uniform([num_nodes, u_num, embedding_u_size], -1.0, 1.0))
print(node_type_embeddings.get_shape().as_list())
trans_weights = tf.Variable(tf.truncated_normal([edge_type_count, embedding_u_size, embedding_size // att_head], stddev=1.0 / math.sqrt(embedding_size)))
trans_weights_s1 = tf.Variable(tf.truncated_normal([edge_type_count, embedding_u_size, dim_a], stddev=1.0 / math.sqrt(embedding_size)))
trans_weights_s2 = tf.Variable(tf.truncated_normal([edge_type_count, dim_a, att_head], stddev=1.0 / math.sqrt(embedding_size)))
nce_weights = tf.Variable(tf.truncated_normal([num_nodes, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([num_nodes]))
# Input data
train_inputs = tf.placeholder(tf.int32, shape=[None])
train_labels = tf.placeholder(tf.int32, shape=[None, 1])
train_types = tf.placeholder(tf.int32, shape=[None])
node_neigh = tf.placeholder(tf.int32, shape=[None, edge_type_count, neighbor_samples])
# Look up embeddings for nodes
node_embed = tf.nn.embedding_lookup(node_embeddings, train_inputs)
# to get neighbors embeddings for all nodes in all types of edges: embedding_lookup(params, ids)
node_embed_neighbors = tf.nn.embedding_lookup(node_type_embeddings, node_neigh)
if args.aggregator == 'max-pooling':
node_embed_tmp = tf.concat([tf.reshape(tf.slice(node_embed_neighbors, [0, i, 0, i, 0], [-1, 1, -1, 1, -1]), [1, -1, neighbor_samples, embedding_u_size]) for i in range(edge_type_count)], axis=0)
node_type_embed = tf.transpose(tf.squeeze(tf.nn.max_pool(node_embed_tmp, ksize=[1, 1, neighbor_samples, 1], strides=[1, 1, neighbor_samples, 1], padding='VALID', data_format='NHWC'), [2]), perm=[1,0,2])
trans_w = tf.nn.embedding_lookup(trans_weights, train_types)
trans_w_s1 = tf.nn.embedding_lookup(trans_weights_s1, train_types)
trans_w_s2 = tf.nn.embedding_lookup(trans_weights_s2, train_types)
attention = tf.reshape(tf.nn.softmax(tf.reshape(tf.matmul(tf.tanh(tf.matmul(node_type_embed, trans_w_s1)), trans_w_s2), [-1, u_num])), [-1, att_head, u_num])
node_type_embed = tf.matmul(attention, node_type_embed)
node_embed = node_embed + tf.reshape(tf.matmul(node_type_embed, trans_w), [-1, embedding_size])
elif args.aggregator == 'mean':
node_embed_tmp = tf.concat([tf.reshape(tf.slice(node_embed_neighbors, [0, i, 0, i, 0], [-1, 1, -1, 1, -1]), [1, -1, neighbor_samples, embedding_u_size]) for i in range(edge_type_count)], axis=0)
node_type_embed = tf.transpose(tf.reduce_mean(node_embed_tmp, axis=2), perm=[1,0,2])
trans_w = tf.nn.embedding_lookup(trans_weights, train_types)
trans_w_s1 = tf.nn.embedding_lookup(trans_weights_s1, train_types)
trans_w_s2 = tf.nn.embedding_lookup(trans_weights_s2, train_types)
attention = tf.reshape(tf.nn.softmax(tf.reshape(tf.matmul(tf.tanh(tf.matmul(node_type_embed, trans_w_s1)), trans_w_s2), [-1, u_num])), [-1, att_head, u_num])
node_type_embed = tf.matmul(attention, node_type_embed)
node_embed = node_embed + tf.reshape(tf.matmul(node_type_embed, trans_w), [-1, embedding_size])
elif args.aggregator == 'LSTM':
node_embed_tmp = tf.concat([tf.reshape(tf.slice(node_embed_neighbors, [0, i, 0, i, 0], [-1, 1, -1, 1, -1]), [1, -1, neighbor_samples, embedding_u_size]) for i in range(edge_type_count)], axis=0)
shape = [node_embed_tmp.shape[k] for k in range(4)]
Y=tf.reshape(node_embed_tmp, [-1, shape[2], shape[3]])
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(embedding_u_size)
node_type_emb, _ = tf.nn.dynamic_rnn(lstm_cell, Y, dtype=tf.float32)
node_type_emb = tf.transpose(node_type_emb, [1, 0, 2])
last = tf.gather(node_type_emb, int(node_type_emb.get_shape()[0]) - 1)
last=tf.reshape(last, [-1, edge_type_count, embedding_u_size])
trans_w = tf.nn.embedding_lookup(trans_weights, train_types)
trans_w_s1 = tf.nn.embedding_lookup(trans_weights_s1, train_types)
trans_w_s2 = tf.nn.embedding_lookup(trans_weights_s2, train_types)
attention = tf.reshape(tf.nn.softmax(tf.reshape(tf.matmul(tf.tanh(tf.matmul(last, trans_w_s1)), trans_w_s2), [-1, u_num])), [-1, att_head, u_num])
last = tf.matmul(attention, last)
node_embed = node_embed + tf.reshape(tf.matmul(last, trans_w), [-1, embedding_size])
last_node_embed = tf.nn.l2_normalize(node_embed, axis=1)
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=last_node_embed,
num_sampled=num_sampled,
num_classes=num_nodes))
plot_loss = tf.summary.scalar("loss", loss)
# Optimizer.
optimizer = tf.train.AdamOptimizer().minimize(loss, global_step=global_step)
# Add ops to save and restore all the variables.
# saver = tf.train.Saver(max_to_keep=20)
merged = tf.summary.merge_all(key=tf.GraphKeys.SUMMARIES)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
print("Optimizing")
with tf.Session(graph=graph) as sess:
writer = tf.summary.FileWriter("./runs/" + log_name, sess.graph) # tensorboard --logdir=./runs
sess.run(init)
print('Training')
g_iter = 0
best_score = 0
patience = 0
for epoch in range(epochs):
random.shuffle(train_pairs)
batches = get_batches(train_pairs, neighbors, batch_size)
data_iter = tqdm.tqdm(batches,
desc="epoch %d" % (epoch),
total=(len(train_pairs) + (batch_size - 1)) // batch_size,
bar_format="{l_bar}{r_bar}")
avg_loss = 0.0
for i, data in enumerate(data_iter):
feed_dict = {train_inputs: data[0], train_labels: data[1], train_types: data[2], node_neigh: data[3]}
_, loss_value, summary_str = sess.run([optimizer, loss, merged], feed_dict)
writer.add_summary(summary_str, g_iter)
g_iter += 1
avg_loss += loss_value
if i % 5000 == 0:
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"loss": loss_value
}
data_iter.write(str(post_fix))
final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))
for i in range(edge_type_count):
for j in range(num_nodes):
final_model[edge_types[i]][index2word[j]] = np.array(sess.run(last_node_embed, {train_inputs: [j], train_types: [i], node_neigh: [neighbors[j]]})[0])
valid_aucs, valid_f1s, valid_prs = [], [], []
test_aucs, test_f1s, test_prs = [], [], []
for i in range(edge_type_count):
if args.eval_type == 'all' or edge_types[i] in args.eval_type.split(','):
tmp_auc, tmp_f1, tmp_pr = evaluate(final_model[edge_types[i]], valid_true_data_by_edge[edge_types[i]], valid_false_data_by_edge[edge_types[i]])
valid_aucs.append(tmp_auc)
valid_f1s.append(tmp_f1)
valid_prs.append(tmp_pr)
tmp_auc, tmp_f1, tmp_pr = evaluate(final_model[edge_types[i]], testing_true_data_by_edge[edge_types[i]], testing_false_data_by_edge[edge_types[i]])
test_aucs.append(tmp_auc)
test_f1s.append(tmp_f1)
test_prs.append(tmp_pr)
print('valid auc:', np.mean(valid_aucs))
print('valid pr:', np.mean(valid_prs))
print('valid f1:', np.mean(valid_f1s))
average_auc = np.mean(test_aucs)
average_f1 = np.mean(test_f1s)
average_pr = np.mean(test_prs)
cur_score = | np.mean(valid_aucs) | numpy.mean |
import suspect
import numpy
def test_null_transform():
fid = numpy.ones(128, 'complex')
data = suspect.MRSData(fid, 1.0 / 128, 123)
transformed_data = suspect.processing.frequency_correction.transform_fid(data, 0, 0)
assert type(transformed_data) == suspect.MRSData
def test_water_peak_alignment_misshape():
spectrum = numpy.zeros(128, 'complex')
spectrum[0] = 1
fids = suspect.MRSData(numpy.zeros((16, 128), 'complex'), 1.0 / 128, 123)
for i in range(fids.shape[0]):
rolled_spectrum = numpy.roll(spectrum, i)
fids[i] = numpy.fft.ifft(rolled_spectrum)
current_fid = numpy.reshape(fids[i], (1, 128))
frequency_shift = suspect.processing.frequency_correction.residual_water_alignment(current_fid)
numpy.testing.assert_almost_equal(frequency_shift, i)
def test_water_peak_alignment():
spectrum = numpy.zeros(128, 'complex')
spectrum[0] = 1
fids = suspect.MRSData(numpy.zeros((16, 128), 'complex'), 1.0 / 128, 123)
for i in range(fids.shape[0]):
rolled_spectrum = | numpy.roll(spectrum, i) | numpy.roll |
import numpy
import scipy.stats
import math
def one_hot(array, N):
"""
Convert an array of numbers to an array of one-hot vectors.
:param array: classes to convert
:type array: numpy.ndarray
:param N: number of classes
:type N: int
:return: one-hot vectors
:rtype: numpy.ndarray
"""
array = array.astype(int)
assert numpy.max(array) < N
assert numpy.min(array) >= 0
one_hot = numpy.zeros((array.shape[0], N))
one_hot[numpy.arange(array.shape[0]), array] = 1
return one_hot
def expand_as(array, array_as):
"""
Expands the tensor using view to allow broadcasting.
:param array: input tensor
:type array: numpy.ndarray
:param array_as: reference tensor
:type array_as: torch.Tensor or torch.autograd.Variable
:return: tensor expanded with singelton dimensions as tensor_as
:rtype: torch.Tensor or torch.autograd.Variable
"""
shape = list(array.shape)
for i in range(len(array.shape), len(array_as.shape)):
shape.append(1)
return array.reshape(shape)
def concatenate(array1, array2, axis=0):
"""
Basically a wrapper for numpy.concatenate, with the exception
that the array itself is returned if its None or evaluates to False.
:param array1: input array or None
:type array1: mixed
:param array2: input array
:type array2: numpy.ndarray
:param axis: axis to concatenate
:type axis: int
:return: concatenated array
:rtype: numpy.ndarray
"""
assert isinstance(array2, numpy.ndarray)
if array1 is not None:
assert isinstance(array1, numpy.ndarray)
return numpy.concatenate((array1, array2), axis=axis)
else:
return array2
def exponential_norm(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly by norm and direction separately.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
truncated_normal = scipy.stats.truncexpon.rvs(1, loc=0, scale=0.9, size=(batch_size, 1))
random *= numpy.repeat(truncated_normal, axis=1, repeats=dim)
return random
def uniform_norm(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly by norm and direction separately.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
uniform = numpy.random.uniform(0, 1, (batch_size, 1)) # exponent is only difference!
random *= numpy.repeat(uniform, axis=1, repeats=dim)
return random
def uniform_ball(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly in the n-ball.
See Harman et al., On decompositional algorithms for uniform sampling from n-spheres and n-balls.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
uniform = numpy.random.uniform(0, 1, (batch_size, 1)) ** (1. / dim)
random *= numpy.repeat(uniform, axis=1, repeats=dim)
return random
def uniform_sphere(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly on the n-sphere.
See Harman et al., On decompositional algorithms for uniform sampling from n-spheres and n-balls.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
return random
def truncated_normal(size, lower=-2, upper=2):
"""
Sample from truncated normal.
See https://stackoverflow.com/questions/18441779/how-to-specify-upper-and-lower-limits-when-using-numpy-random-normal.
:param size: size of vector
:type size: [int]
:param lower: lower bound
:type lower: float
:param upper: upper bound
:type upper: float
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
return scipy.stats.truncnorm.rvs(lower, upper, size=size)
def project_simplex(v, s=1):
"""
Taken from https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246.
Compute the Euclidean projection on a positive simplex
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: int, optional, default: 1,
radius of the simplex
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
<NAME>, <NAME>, <NAME>, and <NAME>.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
n, = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
if v.sum() == s and | numpy.alltrue(v >= 0) | numpy.alltrue |
import torch, math, numpy as np, scipy.sparse as sp
import torch.nn as nn, torch.nn.functional as F, torch.nn.init as init
from torch.autograd import Variable
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
class HyperGraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, a, b, reapproximate=True, cuda=True):
super(HyperGraphConvolution, self).__init__()
self.a, self.b = a, b
self.reapproximate, self.cuda = reapproximate, cuda
self.W = Parameter(torch.FloatTensor(a, b))
self.bias = Parameter(torch.FloatTensor(b))
self.reset_parameters()
def reset_parameters(self):
std = 1. / math.sqrt(self.W.size(1))
self.W.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, structure, H, m=True):
W, b = self.W, self.bias
HW = torch.mm(H, W)
if self.reapproximate:
n, X = H.shape[0], HW.cpu().detach().numpy()
A = Laplacian(n, structure, X, m)
else: A = structure
if self.cuda: A = A.cuda()
A = Variable(A)
AHW = SparseMM.apply(A, HW)
return AHW + b
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.a) + ' -> ' \
+ str(self.b) + ')'
class SparseMM(torch.autograd.Function):
"""
Sparse x dense matrix multiplication with autograd support.
Implementation by <NAME>:
https://discuss.pytorch.org/t/
does-pytorch-support-autograd-on-sparse-matrix/6156/7
"""
@staticmethod
def forward(ctx, M1, M2):
ctx.save_for_backward(M1, M2)
return torch.mm(M1, M2)
@staticmethod
def backward(ctx, g):
M1, M2 = ctx.saved_tensors
g1 = g2 = None
if ctx.needs_input_grad[0]:
g1 = torch.mm(g, M2.t())
if ctx.needs_input_grad[1]:
g2 = torch.mm(M1.t(), g)
return g1, g2
def Laplacian(V, E, X, m):
"""
approximates the E defined by the E Laplacian with/without mediators
arguments:
V: number of vertices
E: dictionary of hyperedges (key: hyperedge, value: list/set of hypernodes)
X: features on the vertices
m: True gives Laplacian with mediators, while False gives without
A: adjacency matrix of the graph approximation
returns:
updated data with 'graph' as a key and its value the approximated hypergraph
"""
edges, weights = [], {}
rv = | np.random.rand(X.shape[1]) | numpy.random.rand |
'''
HMMUtil.py
Provides standard message-passing algorithms for inference in HMMs,
such as the forward-backward algorithm
Intentionally separated from rest of HMM code, so that we can swap in
any fast routine for this calculation with ease.
'''
import numpy as np
from bnpy.util import EPS
from bnpy.util import digamma, gammaln
from bnpy.util.NumericUtil import Config as PlatformConfig
from bnpy.util.NumericUtil import sumRtimesS
from bnpy.util.NumericUtil import inplaceLog
from bnpy.util import as2D
from bnpy.allocmodel.hmm.lib.LibFwdBwd import cppReady, FwdAlg_cpp, BwdAlg_cpp, SummaryAlg_cpp
def calcLocalParams(Data, LP,
transTheta=None, startTheta=None,
limitMemoryLP=1,
hmm_feature_method_LP='forward+backward',
mPairIDs=None,
cslice=(0, None),
**kwargs):
''' Compute local parameters for provided dataset.
Returns
-------
LP : dict of local params, with fields
* resp : 2D array, nAtom x K
if limitMemoryLP=0:
* respPair : 3D array, nAtom x K x K
if limitMemoryLP=1:
* TransCount : 3D array, nSeq x K x K
'''
# Unpack soft evidence 2D array
logLik = LP['E_log_soft_ev']
nAtom, K = logLik.shape
# Calculate trans prob 2D array
digammaSumTransTheta = digamma(np.sum(transTheta[:K, :K + 1], axis=1))
transPi = digamma(transTheta[:K, :K]) - digammaSumTransTheta[:, np.newaxis]
np.exp(transPi, out=transPi)
# Calculate LOG of start state prob vector
logstartPi = digamma(startTheta[:K]) - digamma(np.sum(startTheta[:K + 1]))
# Set starting probs to uniform,
# because Line A below updates first state's logLik to include logstartPi
startPi = np.ones(K)
logMargPr = np.empty(Data.nDoc)
resp = np.empty((nAtom, K))
# Unpack pairs to track for merging.
if mPairIDs is None:
mPairIDs = np.zeros((0, 2))
M = 0
else:
if len(mPairIDs) == 0:
mPairIDs = np.zeros((0, 2))
M = 0
else:
mPairIDs = as2D(mPairIDs)
M = mPairIDs.shape[0]
assert mPairIDs.shape[1] == 2
if hmm_feature_method_LP == 'forward':
fmsg = np.zeros_like(LP['E_log_soft_ev'])
# Run forward backward algorithm on each sequence n
for n in range(Data.nDoc):
start = Data.doc_range[n]
stop = Data.doc_range[n + 1]
logLik_n = logLik[start:stop]
# Adding in start state probs, in log space for stability.
logLik_n[0] += logstartPi
PiInit, PiMat, K = _parseInput_TransParams(startPi, transPi)
logSoftEv = _parseInput_SoftEv(logLik_n, K)
T = logSoftEv.shape[0]
SoftEv, lognormC = expLogLik(logSoftEv)
fmsg_n, margPrObs = FwdAlg(PiInit, PiMat, SoftEv)
if not np.all(np.isfinite(margPrObs)):
raise ValueError('NaN values found. Numerical badness!')
fmsg[start:stop] = fmsg_n
LP['fmsg'] = fmsg
elif limitMemoryLP:
# Track sufficient statistics directly at each sequence.
TransCount = np.empty((Data.nDoc, K, K))
Htable = np.empty((Data.nDoc, K, K))
mHtable = np.zeros((2 * M, K))
# Run forward backward algorithm on each sequence n
for n in range(Data.nDoc):
start = Data.doc_range[n]
stop = Data.doc_range[n + 1]
logLik_n = logLik[start:stop]
# Adding in start state probs, in log space for stability.
logLik_n[0] += logstartPi # Line A
# Run fwd-fwd alg and record result.
resp_n, lp_n, TransCount_n, Htable_n, mHtable_n = \
FwdBwdAlg_LimitMemory(startPi, transPi, logLik_n, mPairIDs)
resp[start:stop] = resp_n
logMargPr[n] = lp_n
TransCount[n] = TransCount_n
Htable[n] = Htable_n
mHtable += mHtable_n
LP['resp'] = resp
LP['evidence'] = np.sum(logMargPr)
LP['TransCount'] = TransCount
LP['Htable'] = Htable
LP['mHtable'] = mHtable
else:
# Track pair-wise assignment probs for each sequence
respPair = np.empty((nAtom, K, K))
# Run the forward backward algorithm on each sequence
for n in range(Data.nDoc):
start = Data.doc_range[n]
stop = Data.doc_range[n + 1]
logLik_n = logLik[start:stop]
# Adding in start state probs, in log space for stability.
logLik_n[0] += logstartPi # Line A
resp_n, respPair_n, lp_n = \
FwdBwdAlg(startPi, transPi, logLik_n)
resp[start:stop] = resp_n
respPair[start:stop] = respPair_n
logMargPr[n] = lp_n
LP['evidence'] = np.sum(logMargPr)
LP['resp'] = resp
LP['respPair'] = respPair
# ... end if statement on limitMemoryLP
return LP
def FwdBwdAlg(PiInit, PiMat, logSoftEv):
'''Execute forward-backward algorithm for one sequence.
Args
-------
piInit : 1D array, size K
initial transition distribution to each of the K states
must be valid probability vector (positive entries, sums to one)
piMat : 2D array, size KxK
piMat[j] is transition distribution from state j to all K states.
each row must be probability vector (positive entries, sums to one)
logSoftEv : 2D array, size TxK
logSoftEv[t] := log p( x[t] | z[tk] = 1)
log likelihood of observation t under state k
if given exactly,
* resp, respPair will be exact
* logMargPrSeq will be exact
if given up to an additive constant,
* resp, respPair will be exact
* logMargPrSeq will be off by an additive constant
Returns
-------
resp : 2D array, size T x K
resp[t,k] = marg. prob. that step t assigned to state K
p( z[t,k] = 1 | x[1], x[2], ... x[T])
respPair : 3D array, size T x K x K
respPair[t,j,k] = prob. of the joint event that
* step t-1 assigned to state j
* step t assigned to state k
Formally = p( z[t-1,j] = 1, z[t,k] = 1 | x[1], x[2], ... x[T])
respPair[0,:,:] is undefined, but kept so indexing consistent.
logMargPrSeq : scalar real
logMargPrSeq = joint log probability of the observed sequence
log p( x[1], x[2], ... x[T] )
'''
PiInit, PiMat, K = _parseInput_TransParams(PiInit, PiMat)
logSoftEv = _parseInput_SoftEv(logSoftEv, K)
T = logSoftEv.shape[0]
SoftEv, lognormC = expLogLik(logSoftEv)
fmsg, margPrObs = FwdAlg(PiInit, PiMat, SoftEv)
if not np.all(np.isfinite(margPrObs)):
raise ValueError('NaN values found. Numerical badness!')
bmsg = BwdAlg(PiInit, PiMat, SoftEv, margPrObs)
resp = fmsg * bmsg
respPair = calcRespPair_fast(PiMat, SoftEv, margPrObs, fmsg, bmsg, K, T)
logMargPrSeq = np.log(margPrObs).sum() + lognormC.sum()
return resp, respPair, logMargPrSeq
def FwdBwdAlg_LimitMemory(PiInit, PiMat, logSoftEv, mPairIDs):
'''Execute forward-backward algorithm using only O(K) memory.
Args
-------
piInit : 1D array, size K
piMat : 2D array, size KxK
logSoftEv : 2D array, size TxK
Returns
-------
resp : 2D array, size T x K
resp[t,k] = marg. prob. that step t assigned to state K
p( z[t,k] = 1 | x[1], x[2], ... x[T])
TransCount
Htable
logMargPrSeq : scalar real
logMargPrSeq = joint log probability of the observed sequence
log p( x[1], x[2], ... x[T] )
'''
PiInit, PiMat, K = _parseInput_TransParams(PiInit, PiMat)
logSoftEv = _parseInput_SoftEv(logSoftEv, K)
SoftEv, lognormC = expLogLik(logSoftEv)
T = logSoftEv.shape[0]
fmsg, margPrObs = FwdAlg(PiInit, PiMat, SoftEv)
if not np.all(np.isfinite(margPrObs)):
raise ValueError('NaN values found. Numerical badness!')
bmsg = BwdAlg(PiInit, PiMat, SoftEv, margPrObs)
resp = fmsg * bmsg
logMargPrSeq = np.log(margPrObs).sum() + lognormC.sum()
TransStateCount, Htable, mHtable = SummaryAlg(
PiInit, PiMat, SoftEv, margPrObs, fmsg, bmsg, mPairIDs)
return resp, logMargPrSeq, TransStateCount, Htable, mHtable
def calcRespPair_forloop(PiMat, SoftEv, margPrObs, fmsg, bmsg, K, T):
''' Calculate pair-wise responsibilities for all adjacent timesteps
Uses a simple, for-loop implementation.
See calcRespPair_fast for a equivalent function that is much faster.
Returns
---------
respPair : 3D array, size T x K x K
respPair[t,j,k] = prob. of the joint event that
* step t-1 assigned to state j
* step t assigned to state k
Formally = p( z[t-1,j] = 1, z[t,k] = 1 | x[1], x[2], ... x[T])
respPair[0,:,:] is undefined, but kept so indexing consistent.
'''
respPair = np.zeros((T, K, K))
for t in range(1, T):
respPair[t] = np.outer(fmsg[t - 1], bmsg[t] * SoftEv[t])
respPair[t] *= PiMat / margPrObs[t]
return respPair
def calcRespPair_fast(PiMat, SoftEv, margPrObs, fmsg, bmsg, K, T,
doCopy=0):
''' Calculate pair-wise responsibilities for all adjacent timesteps
Uses a fast, vectorized algorithm.
Returns
---------
respPair : 3D array, size T x K x K
respPair[t,j,k] = prob. of the joint event that
* step t-1 assigned to state j
* step t assigned to state k
Formally = p( z[t-1,j] = 1, z[t,k] = 1 | x[1], x[2], ... x[T])
respPair[0,:,:] is undefined, but kept so indexing consistent.
'''
if doCopy:
bmsgSoftEv = SoftEv * bmsg
else:
bmsgSoftEv = SoftEv # alias
bmsgSoftEv *= bmsg # in-place multiplication
respPair = np.zeros((T, K, K))
respPair[1:] = fmsg[:-1][:, :, np.newaxis] * \
bmsgSoftEv[1:][:, np.newaxis, :]
respPair *= PiMat[np.newaxis, :, :]
respPair /= margPrObs[:, np.newaxis, np.newaxis]
return respPair
def FwdAlg(PiInit, PiMat, SoftEv):
''' Forward algorithm for a single HMM sequence. Wrapper for py/cpp.
Related
-------
FwdAlg_py
Returns
-------
fmsg : 2D array, size T x K
fmsg[t,k] = p( z[t,k] = 1 | x[1] ... x[t] )
margPrObs : 1D array, size T
margPrObs[t] = p( x[t] | x[1], x[2], ... x[t-1] )
'''
if cppReady() and PlatformConfig['FwdBwdImpl'] == "cpp":
return FwdAlg_cpp(PiInit, PiMat, SoftEv)
else:
return FwdAlg_py(PiInit, PiMat, SoftEv)
def BwdAlg(PiInit, PiMat, SoftEv, margPrObs):
''' Backward algorithm for a single HMM sequence.
Wrapper for BwdAlg_py/BwdAlg_cpp.
Related
-------
BwdAlg_py
Returns
-------
bmsg : 2D array, size TxK
bmsg[t,k] = p( x[t+1], x[t+2], ... x[T] | z[t,k] = 1 )
-------------------------------------
p( x[t+1], x[t+2], ... x[T] | x[1] ... x[t])
'''
if cppReady() and PlatformConfig['FwdBwdImpl'] == "cpp":
return BwdAlg_cpp(PiInit, PiMat, SoftEv, margPrObs)
else:
return BwdAlg_py(PiInit, PiMat, SoftEv, margPrObs)
def FwdAlg_py(PiInit, PiMat, SoftEv):
''' Forward algorithm for a single HMM sequence. In pure python.
Execute forward message-passing on an observed sequence
given HMM state transition params and likelihoods of each observation
Args
-------
piInit : 1D array, size K
initial transition distribution to each of the K states
must be valid probability vector (positive entries, sums to one)
piMat : 2D array, size KxK
piMat[j] is transition distribution from state j to all K states.
each row must be probability vector (positive entries, sums to one)
SoftEv : 2D array, size TxK
SoftEv[t] := p( x[t] | z[tk] = 1)
likelihood of observation t under state k
given up to an additive constant for each t
Returns
-------
fmsg : 2D array, size T x K
fmsg[t,k] = p( z[t,k] = 1 | x[1] ... x[t] )
margPrObs : 1D array, size T
margPrObs[t] = p( x[t] | x[1], x[2], ... x[t-1] )
'''
T = SoftEv.shape[0]
K = PiInit.size
PiTMat = PiMat.T
fmsg = np.empty((T, K))
margPrObs = np.zeros(T)
for t in range(0, T):
if t == 0:
fmsg[t] = PiInit * SoftEv[0]
else:
fmsg[t] = np.dot(PiTMat, fmsg[t - 1]) * SoftEv[t]
margPrObs[t] = np.sum(fmsg[t])
fmsg[t] /= margPrObs[t]
return fmsg, margPrObs
def BwdAlg_py(PiInit, PiMat, SoftEv, margPrObs):
'''Backward algorithm for a single HMM sequence. In pure python.
Takes as input the HMM state transition params,
initial probabilities, and likelihoods of each observation.
Requires running forward filtering first, to obtain correct scaling.
Args
-------
piInit : 1D array, size K
initial transition distribution to each of the K states
must be valid probability vector (positive entries, sums to one)
piMat : 2D array, size KxK
piMat[j] is transition distribution from state j to all K states.
each row must be probability vector (positive entries, sums to one)
SoftEv : 2D array, size TxK
SoftEv[t] := p( x[t] | z[tk] = 1)
likelihood of observation t under state k
given up to an additive constant for each t
margPrObs : 1D array, size T
margPrObs[t] := p( x[t] | x[1], x[2], ... x[t-1] )
this is returned by FwdAlg
Returns
-------
bmsg : 2D array, size TxK
bmsg[t,k] = p( x[t+1], x[t+2], ... x[T] | z[t,k] = 1 )
-------------------------------------
p( x[t+1], x[t+2], ... x[T] | x[1] ... x[t])
'''
T = SoftEv.shape[0]
K = PiInit.size
bmsg = np.ones((T, K))
for t in range(T - 2, -1, -1):
bmsg[t] = np.dot(PiMat, bmsg[t + 1] * SoftEv[t + 1])
bmsg[t] /= margPrObs[t + 1]
return bmsg
def SummaryAlg(*args):
''' Summarize pairwise potentials of single HMM sequence.
Related
-------
SummaryAlg_py
Returns
-------
TransStateCount
Htable
'''
if cppReady() and PlatformConfig['FwdBwdImpl'] == "cpp":
return SummaryAlg_cpp(*args)
else:
return SummaryAlg_py(*args)
def SummaryAlg_py(PiInit, PiMat, SoftEv, margPrObs, fMsg, bMsg,
mPairIDs=None):
K = PiInit.size
T = SoftEv.shape[0]
if mPairIDs is None:
M = 0
else:
if len(mPairIDs) == 0:
M = 0
else:
mPairIDs = as2D(np.asarray(mPairIDs, dtype=np.int32))
assert mPairIDs.ndim == 2
assert mPairIDs.shape[1] == 2
assert mPairIDs.shape[0] > 0
M = mPairIDs.shape[0]
mHtable = np.zeros((2 * M, K))
respPair_t = np.zeros((K, K))
Htable = np.zeros((K, K))
TransStateCount = np.zeros((K, K))
for t in range(1, T):
respPair_t = np.outer(fMsg[t - 1], bMsg[t] * SoftEv[t])
respPair_t *= PiMat / margPrObs[t]
TransStateCount += respPair_t
respPair_t += 1e-100
rowwiseSum = np.sum(respPair_t, axis=1)
Htable += respPair_t * np.log(respPair_t) \
- respPair_t * np.log(rowwiseSum)[:, np.newaxis]
if M > 0:
respPair = calcRespPair_fast(PiMat, SoftEv,
margPrObs, fMsg, bMsg,
K, T, doCopy=1)
for m in range(M):
kA = mPairIDs[m, 0]
kB = mPairIDs[m, 1]
mHtable[
2 *
m:2 *
m +
2] = calc_sub_Htable_forMergePair(
respPair,
kA,
kB)
Htable *= -1
return TransStateCount, Htable, mHtable
def expLogLik(logSoftEv, axis=1):
''' Return element-wise exp of input log likelihood
Numerically safe, guaranteed not to underflow
Returns
--------
SoftEv : 2D array, size TxK
equal to exp(logSoftEv), up to prop constant for each row
lognormC : 1D array, size T
gives log of the prop constant for each row
'''
lognormC = np.max(logSoftEv, axis)
if axis == 0:
logSoftEv = logSoftEv - lognormC[np.newaxis, :]
elif axis == 1:
logSoftEv = logSoftEv - lognormC[:, np.newaxis]
SoftEv = np.exp(logSoftEv)
return SoftEv, lognormC
def _parseInput_TransParams(PiInit, PiMat):
PiInit = np.asarray(PiInit, dtype=np.float64)
PiMat = np.asarray(PiMat, dtype=np.float64)
assert PiInit.ndim == 1
K0 = PiInit.shape[0]
assert PiMat.ndim == 2
J, K = PiMat.shape
assert J == K
assert K0 == K
return PiInit, PiMat, K
def _parseInput_SoftEv(logSoftEv, K):
logSoftEv = np.asarray(logSoftEv, dtype=np.float64)
Tl, Kl = logSoftEv.shape
assert Kl == K
return logSoftEv
def runViterbiAlg(logSoftEv, logPi0, logPi):
''' Run viterbi algorithm to estimate MAP states for single sequence.
Args
------
logSoftEv : 2D array, T x K
log soft evidence matrix
each row t := log p( x[t] | z[t]=k )
logPi0 : 1D array, K
initial state log probability vector
sums to one after exponentiating
logPi : 2D array, K x K
j-th row is is log transition probability vector for state j
each row sums to one after exponentiating
Returns
------
zHat : 1D array, length T, representing the MAP state sequence
zHat[t] gives the integer label {1, 2, ... K} of state at timestep t
'''
if | np.any(logPi0 > 0) | numpy.any |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri, 25 May 2018 20:29:09
@author: luohao
"""
"""
CVPR2017 paper:<NAME>, <NAME>, <NAME>, et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.
url:http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf
Matlab version: https://github.com/zhunzhong07/person-re-ranking
"""
"""
API
probFea: all feature vectors of the query set (torch tensor)
probFea: all feature vectors of the gallery set (torch tensor)
k1,k2,lambda: parameters, the original paper is (k1=20,k2=6,lambda=0.3)
MemorySave: set to 'True' when using MemorySave mode
Minibatch: avaliable when 'MemorySave' is 'True'
"""
import numpy as np
import torch
from scipy.spatial.distance import cdist
import time
import gc
from tqdm import tqdm
def euclidean_distance(qf, gf):
m = qf.shape[0]
n = gf.shape[0]
dist_mat = torch.pow(qf,2).sum(dim=1, keepdim=True).expand(m,n) +\
torch.pow(gf,2).sum(dim=1, keepdim=True).expand(n,m).t()
dist_mat.addmm_(1,-2,qf,gf.t())
# # for L2-norm feature
# dist_mat = 2 - 2 * torch.matmul(qf, gf.t())
return dist_mat
def batch_euclidean_distance(qf, gf, N=6000):
m = qf.shape[0]
n = gf.shape[0]
dist_mat = []
for j in range(n // N + 1):
temp_gf = gf[j * N:j * N + N]
temp_qd = []
for i in range(m // N + 1):
temp_qf = qf[i * N:i * N + N]
temp_d = euclidean_distance(temp_qf, temp_gf)
temp_qd.append(temp_d)
temp_qd = torch.cat(temp_qd, dim=0)
temp_qd = temp_qd / (torch.max(temp_qd, dim=0)[0])
dist_mat.append(temp_qd.t().cpu())
del temp_qd
del temp_gf
del temp_qf
del temp_d
torch.cuda.empty_cache() # empty GPU memory
dist_mat = torch.cat(dist_mat, dim=0)
return dist_mat
# 将topK排序放到GPU里运算,并且只返回k1+1个结果
# Compute TopK in GPU and return (k1+1) results
def batch_torch_topk(qf, gf, k1, N=6000):
m = qf.shape[0]
n = gf.shape[0]
dist_mat = []
initial_rank = []
for j in range(n // N + 1):
temp_gf = gf[j * N:j * N + N]
temp_qd = []
for i in range(m // N + 1):
temp_qf = qf[i * N:i * N + N]
temp_d = euclidean_distance(temp_qf, temp_gf)
temp_qd.append(temp_d)
temp_qd = torch.cat(temp_qd, dim=0)
temp_qd = temp_qd / (torch.max(temp_qd, dim=0)[0])
temp_qd = temp_qd.t()
initial_rank.append(torch.topk(temp_qd, k=k1, dim=1, largest=False, sorted=True)[1])
del temp_qd
del temp_gf
del temp_qf
del temp_d
torch.cuda.empty_cache() # empty GPU memory
initial_rank = torch.cat(initial_rank, dim=0).cpu().numpy()
return initial_rank
def batch_v(feat, R, all_num):
V = np.zeros((all_num, all_num), dtype=np.float32)
m = feat.shape[0]
for i in tqdm(range(m)):
temp_gf = feat[i].unsqueeze(0)
# temp_qd = []
temp_qd = euclidean_distance(temp_gf, feat)
temp_qd = temp_qd / (torch.max(temp_qd))
temp_qd = temp_qd.squeeze()
temp_qd = temp_qd[R[i]]
weight = torch.exp(-temp_qd)
weight = (weight / torch.sum(weight)).cpu().numpy()
V[i, R[i]] = weight.astype(np.float32)
return V
def k_reciprocal_neigh(initial_rank, i, k1):
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
return forward_k_neigh_index[fi]
def re_ranking(probFea, galFea, k1, k2, lambda_value):
# The following naming, e.g. gallery_num, is different from outer scope.
# Don't care about it.
t1 = time.time()
query_num = probFea.size(0)
all_num = query_num + galFea.size(0)
feat = torch.cat([probFea, galFea]).cuda()
initial_rank = batch_torch_topk(feat, feat, k1 + 1, N=6000)
# del feat
del probFea
del galFea
torch.cuda.empty_cache() # empty GPU memory
gc.collect() # empty memory
# print('Using totally {:.2f}s to compute initial_rank'.format(time.time() - t1))
print('starting re_ranking')
R = []
for i in tqdm(range(all_num)):
# k-reciprocal neighbors
k_reciprocal_index = k_reciprocal_neigh(initial_rank, i, k1)
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_k_reciprocal_index = k_reciprocal_neigh(initial_rank, candidate, int(np.around(k1 / 2)))
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
R.append(k_reciprocal_expansion_index)
gc.collect() # empty memory
print('Using totally {:.2f}S to compute R'.format(time.time() - t1))
V = batch_v(feat, R, all_num)
del R
gc.collect() # empty memory
print('Using totally {:.2f}S to compute V-1'.format(time.time() - t1))
initial_rank = initial_rank[:, :k2]
### 下面这个版本速度更快
### Faster version
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i], :], axis=0)
V = V_qe
del V_qe
del initial_rank
### 下面这个版本更省内存(约40%),但是更慢
### Low-memory version
'''gc.collect() # empty memory
N = 2000
for j in range(all_num // N + 1):
if k2 != 1:
V_qe = np.zeros_like(V[:, j * N:j * N + N], dtype=np.float32)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i], j * N:j * N + N], axis=0)
V[:, j * N:j * N + N] = V_qe
del V_qe
del initial_rank'''
gc.collect() # empty memory
print('Using totally {:.2f}S to compute V-2'.format(time.time() - t1))
invIndex = []
for i in range(all_num):
invIndex.append(np.where(V[:, i] != 0)[0])
print('Using totally {:.2f}S to compute invIndex'.format(time.time() - t1))
jaccard_dist = np.zeros((query_num, all_num), dtype=np.float32)
for i in tqdm(range(query_num)):
temp_min = | np.zeros(shape=[1, all_num], dtype=np.float32) | numpy.zeros |
#!/usr/bin/python3
from argparse import ArgumentParser
import datetime # date and time manipulations
from matplotlib import pyplot
import numpy as np
import re # regular expression operations
import requests # HTTP requests
from termcolor import colored # colored terminal output
def get_cookie_crumb(url='https://finance.yahoo.com/quote/SPY/history'):
'''
Return site cookie and crumb needed to access stock data.
Args:
url: site providing historical stock data (optional)
'''
res = requests.get(url)
cookie = res.cookies['B']
crumb = None
pattern = re.compile('.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}')
for line in res.text.splitlines():
m = pattern.match(line)
if m is not None:
crumb = m.groupdict()['crumb']
return cookie, crumb
def get_stock_data(stock, days, cookie, crumb,
url='https://query1.finance.yahoo.com/v7/finance/download/'):
'''
Return historic stock prices covering N days in the past from today.
Args:
stock: stock symbol
days: number of days in the past from today
cookie: site cookie needed to access stock data
crumb: site crumb needed to access stock data
url: site providing historical stock data (optional)
'''
time_past = (datetime.date.today() - datetime.timedelta(days=days)).strftime('%s')
time_curr = datetime.date.today().strftime('%s')
interval = '1d' if days < 367 else '1wk'
params = {'symbol': stock,
'period1': time_past,
'period2': time_curr,
'interval': interval,
'crumb': crumb}
res = requests.get(url, params=params, cookies={'B' : cookie})
return res.text
class StockAnalizer:
'''
Provides several tools to analyze the historic prices of a given stock. On top of the stock
price, several EMAs (Exponential Moving Averages) can also be displayed. The stock trend can be
added as an overlay display.
'''
def __init__(self):
self.date = []
self.price = []
self.ema = {}
self.trend = []
def process_stock_data(self, history):
'''
Process historic stock prices and store them internally for future manipulations.
Args:
history: historic stock prices in a text format
'''
date_idx = None
price_idx = None
process_header = True
for line in history.splitlines():
data = line.split(',')
if process_header:
try:
date_idx = data.index('Date')
except:
print('\ndate information not available\n')
return
try:
price_idx = data.index('Close')
except:
print('\nprice information not available\n')
return
process_header = False
continue
if data[price_idx] == 'null':
continue
dt = data[date_idx].split('-')
self.date.append(dt[1] + '/' + dt[2] + '/' + dt[0][2:]) # store date as MM/DD/YY
self.price.append(float(data[price_idx]))
def compute_ema_data(self, days):
'''
Compute and store internally EMAs based on given number of days.
Args:
days: number of days to use when averaging
'''
# return if no stock data
if len(self.date) == 0:
print('\ncompute_ema_data: skipping...no data available\n')
return
# compute EMAs
self.ema[days] = [self.price[0]]
w = 2.0 / (days + 1) # stock price weight
for i in range(1, len(self.price)):
ema = self.price[i] * w + self.ema[days][-1] * (1 - w)
self.ema[days].append(ema)
def compute_trend_data(self, ema_key, stride):
'''
Compute and store internally the trend of the stock over time. The trend is calculated
based on previously computed EMA and the given stride.
Args:
ema_key: specifies what EMA to use when computing the trend
stride: determines what reference value to use when computing the trend
'''
# return if EMA key not found
if ema_key not in self.ema.keys():
print('\ncompute_trend_data: skipping...selected EMA not available\n')
return
# compute the trend
for i in range(len(self.date)):
ref = self.ema[ema_key][max(0, i - stride)]
dif = 100.0 * (self.ema[ema_key][i] - ref) / ref # as percentage
self.trend.append(dif)
def plot_data(self, stock, period, path):
'''
Show on a plot the historic stock price, the computed EMAs and the trend if available.
Args:
stock: stock symbol
period: some text indicating the time period that is displayed
path: save plot if path is provided
'''
# return if no stock data
if len(self.date) == 0:
print('\nplot_data: skipping...no data available\n')
return
# set up the plot
fig = pyplot.figure(1, figsize=(10, 4))
fig.subplots_adjust(left=0.07, bottom=0.1, right=0.93, top=0.9)
plt_trend = fig.subplots()
plt_trend.set_ylabel('Trend (%)')
plt_trend.yaxis.label.set_color('green')
plt_trend.tick_params(axis='y', colors='green')
plt_price = plt_trend.twinx()
plt_price.set_ylabel('Price ($)')
# plot stock price
if len(self.date) != 0:
plt_price.plot(self.date, self.price, 'black', linewidth=1.5)
plt_price.yaxis.set_label_coords(-0.07, 0.5)
plt_price.yaxis.tick_left()
# plot EMAs
ema_clr = ['blue', 'red', 'purple', 'orange']
for i, key in enumerate(self.ema.keys()):
plt_price.plot(self.date, self.ema[key], ema_clr[i % len(ema_clr)], linewidth=1,
label='EMA '+str(key))
# plot trend
if len(self.trend) != 0:
plt_trend.plot(self.date, self.trend, 'green', linewidth=1, linestyle='dotted')
plt_trend.axhline(y=0.0, color='grey', linewidth=1, linestyle='dotted')
plt_trend.yaxis.set_label_coords(1.07, 0.5)
plt_trend.yaxis.tick_right()
# set up labels on x-axis
nx = 8 # number of labels on x-axis
pyplot.xticks([int(1.0 * i * (len(self.date) - 1) / (nx - 1)) for i in range(nx)])
# set up title
price_now = self.price[-1]
price_old = self.price[0]
pyplot.suptitle(
stock.upper() +
' • ${:.2f}'.format(price_now) +
' • {:.2f}% ('.format(100.0 * (price_now - price_old) / price_old) +
period.upper() + ')'
)
plt_price.legend(frameon=False)
if path is not None:
pyplot.savefig(path + stock.lower() + '_' + period.lower(), dpi=100)
pyplot.show()
def main():
# set up command-line options
parser = ArgumentParser(description='Display trends of the selected stock(s).', add_help=True)
parser.add_argument('positional', metavar='stock', nargs='+', help='stock symbol(s)')
group_time = parser.add_mutually_exclusive_group(required=True)
group_time.add_argument('-w', '--week', metavar='time', type=int, help='time interval in weeks')
group_time.add_argument('-m', '--month', metavar='time', type=int, help='time interval in months')
group_time.add_argument('-y', '--year', metavar='time', type=int, help='time interval in years')
group_extra = parser.add_mutually_exclusive_group()
group_extra.add_argument('-c', '--correlation', action='store_true',
help='compute stock correlation(s) (at least two stocks must be given)')
group_extra.add_argument('-s', '--save', metavar='path',
help='save stock trends as PNG files using given path')
args = parser.parse_args()
# parse arguments
stocks = args.positional
if args.week is not None:
period = args.week * 7
period_str = str(args.week) + 'W'
elif args.month is not None:
period = args.month * 30
period_str = str(args.month) + 'M'
elif args.year is not None:
period = args.year * 365
period_str = str(args.year) + 'Y'
if args.correlation and len(stocks) < 2:
print('\nat least two stocks must be given to compute correlation\n')
exit()
# get site handles
while True:
cookie, crumb = get_cookie_crumb()
if '\\u002' not in crumb:
break
# process stocks
stock_ana = {}
for stock in stocks:
history = get_stock_data(stock, period, cookie, crumb)
if 'error' in history:
print('\nunable to retrive data for ' + stock.upper() + '\n')
continue
stock_ana[stock] = StockAnalizer()
stock_ana[stock].process_stock_data(history)
# print correlation(s)
if args.correlation:
prices = []
for stock, analzr in stock_ana.items():
price_diff = | np.diff(analzr.price) | numpy.diff |
# Copyright 2020 Lawrence Livermore National Security, LLC and other authors: <NAME>, <NAME>, <NAME>
# SPDX-License-Identifier: MIT
import numpy as np
import tensorflow as tf
from model import generator
from utils import block_diagonal
from skimage.measure import compare_psnr
from PIL import Image
import pybm3d
import os
from skimage.io import imsave
from utils import grid_imsave, merge
def projector_tf(imgs,phi=None):
csproj = tf.matmul(imgs,tf.squeeze(phi))
return csproj
def sample_Z(m, n):
return np.random.uniform(-1,1,size=[m, n])
def GPP_SC_solve(test_img_name='Parrots',a_m=1.0,b_m=0.0,savedir='outs_sc_tf',USE_BM3D=False):
modelsave ='./gan_models/gen_models_corrupt-cifar32'
fname = '/p/lustre1/anirudh1/GAN/mimicGAN/IMAGENET/test_images/{}.tif'.format(test_img_name)
if not os.path.exists(savedir):
os.makedirs(savedir)
I_x = I_y = 256
d_x = d_y = 32
dim_x = d_x*d_y
batch_size = (I_x*I_y)//(dim_x)
n_measure = 0.1
lr_factor = 1.0#*batch_size//64
n_img_plot = int(np.sqrt(batch_size))
dim_z = 100
dim_phi = int(n_measure*dim_x)
nIter = 151
def mimic_correction_v2(phi_old,y_obs,G_curr,n_batch=batch_size):
a_list = []
b_list = []
for i in range(n_batch):
phi_block = block_diagonal(1*[phi_old])
y_block = tf.reshape(y_obs[i,:],[1,1*dim_phi])
G_block = tf.reshape(G_curr[i,:],[1,1*dim_x])
I = tf.ones_like(phi_old)
I_block = block_diagonal(1*[I])
y_m = tf.matmul(G_block,I_block)
y_hat = tf.matmul(G_block,phi_block)
theta_1 = tf.squeeze(tf.matmul(y_hat,tf.transpose(y_hat)))
theta_2 = tf.squeeze(tf.matmul(y_hat,tf.transpose(y_m)))
C0 = tf.matmul(y_block,tf.transpose(y_hat))
theta_4 = tf.matmul(y_m,tf.transpose(y_m))
C1 = tf.matmul(y_block,tf.transpose(y_m))
a_est = tf.squeeze((theta_4*C0-C1*theta_2)/(theta_1*theta_4 - theta_2*theta_2))
b_est = tf.squeeze((C1 - a_est*theta_2)/theta_4)
# a_est = tf.squeeze((C1-theta_4*b_est)/(theta_2))
a_list.append(a_est)
b_list.append(b_est)
a_approx = tf.reduce_mean(a_list)
b_approx = tf.reduce_mean(b_list)
# b_approx = tf.squeeze((C1 - a_approx*theta_2)/theta_4)
# a_approx = tf.squeeze((C1-theta_4*b_approx)/(theta_2))
return a_approx,b_approx
x_test = Image.open(fname).convert(mode='L').resize((I_x,I_y))
x_test_ = np.array(x_test)/255.
# x_test_ = 2*x_test_-1
x_test = []
for i in range(n_img_plot):
for j in range(n_img_plot):
_x = x_test_[i*d_x:d_x*(i+1),j*d_y:d_y*(j+1)]
x_test.append(_x)
x_test = np.array(x_test)
x_test = np.expand_dims(x_test,3)
test_images = x_test[:batch_size,:,:,:]
grid_imsave(test_images,[n_img_plot,n_img_plot],'{}/gt_sample.png'.format(savedir))
tf.reset_default_graph()
tf.set_random_seed(0)
np.random.seed(4321)
Y_obs_ph = tf.placeholder(tf.float32,[batch_size,dim_phi])
phi_ph = tf.placeholder(tf.float32,[dim_x,dim_phi])
lr = tf.placeholder(tf.float32)
tmp = tf.random_uniform([100,dim_z],minval=-1.0,maxval=1.0)
tmp = tf.expand_dims(tf.reduce_mean(tmp,axis=0),axis=0)
z_ = tf.tile(tmp,[batch_size,1])
z_prior_ = tf.Variable(z_,name="z_prior")
G_sample_ = 0.5*generator(z_prior_,False)+0.5
G_sample = tf.image.resize_images(G_sample_,[d_x,d_y],tf.image.ResizeMethod.BICUBIC)
G_sample_re = tf.reshape(G_sample,[-1,dim_x])
a_est,b_est = mimic_correction_v2(phi_ph,Y_obs_ph,G_sample_re)
phi_est = a_est*phi_ph+b_est
proj_init = projector_tf(G_sample_re,phi_ph)
proj_corrected = projector_tf(G_sample_re,phi_est)
G_loss = tf.reduce_mean(tf.square(proj_corrected-Y_obs_ph))
loss = tf.reduce_mean(tf.abs(proj_corrected-Y_obs_ph))
z_mean = tf.expand_dims(tf.reduce_mean(z_prior_,axis=0),axis=0)
z_mean = tf.tile(z_mean,[64,1])
z_reg_loss = tf.reduce_mean(tf.abs(z_prior_-z_mean))
opt_loss = G_loss
t_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
g_vars = [var for var in t_vars if 'Generator' in var.name]
solution_opt = tf.train.RMSPropOptimizer(lr).minimize(opt_loss, var_list=[z_prior_])
saver = tf.train.Saver(g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(modelsave)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("************ Generator weights restored! **************")
nb = batch_size
z_test = sample_Z(100,dim_z)
phi_np = np.random.randn(dim_x,dim_phi)
phi_test_np = a_m*phi_np+ b_m
y_obs = np.matmul(test_images.reshape(-1,dim_x),phi_test_np)
lr_start = 5e-3 #TBD
a_ests = []
b_ests = []
psnrs = []
for i in range(nIter):
# lr_new = lr_start*0.99**(1.*i/nIter)
if i<30:
lr_new = lr_start
else:
lr_new = 7e-3
if i %10==0:
G_imgs,tr_loss,a_estimate,b_estimate = sess.run([G_sample,G_loss,a_est,b_est],feed_dict={phi_ph:phi_np,Y_obs_ph:y_obs})
print('iter: {:d}, a*-estiate: {:.4f}, b*-estimate: {:.4f}'.format(i,a_estimate,b_estimate))
merged = merge(G_imgs,[n_img_plot,n_img_plot])
psnr0 = compare_psnr(x_test_,merged,data_range=1.0)
if USE_BM3D:
merged_clean = pybm3d.bm3d.bm3d(merged,0.25)
psnr1 = compare_psnr(x_test_,merged_clean,data_range=1.0)
merged_clean = | np.array(merged_clean*255,dtype=np.uint8) | numpy.array |
#!/usr/bin/env python
from sklearn.cluster import AgglomerativeClustering
from sklearn.mixture import BayesianGaussianMixture
import numpy as np
from hdphmm.utils import stats, timeseries
import tqdm
class Cluster:
def __init__(self, params, distance_threshold=2., eigs=False, diags=False, means=False, algorithm='bayesian', ncomponents=10, max_iter=1500,
weight_concentration_prior_type='dirichlet_process', weight_concentration_prior=None,
mean_precision_prior=1, init_params='random', nclusters=None, linkage='ward', convert_rz=False):
""" Cluster like parameter sets
NOTE this is only made for AR(1) at the moment.
:param params: parameters for each trajectory. If a single trajectory was analyzed, you can just pass a single \
dict, otherwise pass a list of dicts
:param distance_threshold: clustering parameter
:param eigs: if True, use largest eigenvalue of AR coefficient and covariance matrices instead of flattened \
matrices.
:param means: if True, take the mean of each set of parameters
:param algorithm: type of clustering algorithm.'bayesian' and 'agglomerative' are implemented
:type params: list or dict
:type distance_threshold: float
:type eigs: bool
:type algorithm: str
"""
cluster_fxn = {'bayesian': self._bayesian, 'agglomerative': self._agglomerative}
try:
self.cluster_fxn = cluster_fxn[algorithm]
except KeyError:
raise Exception("Clustering algorithm, '%s', not implemented. Use either 'bayesian' or 'agglomerative'" % algorithm)
self.means = means
self.ncomponents = ncomponents
self.max_iter = max_iter
self.weight_concentration_prior_type = weight_concentration_prior_type
self.gamma = weight_concentration_prior
self.mean_precision_prior = mean_precision_prior
self.init_params = init_params
self.eigs = eigs
self.diags = diags
self.nclusters = nclusters
self.convert_rz = convert_rz
if self.nclusters is not None:
self.distance_threshold = None
else:
self.distance_threshold = distance_threshold
self.linkage = linkage
if isinstance(params, dict):
A = None
sigma = None
if 'A' in params.keys():
A = self._flatten_A(np.copy(params['A']))
if 'sigma' in params.keys():
sigma = self._flatten_sigma(np.copy(params['sigma']))
mu = None
if 'mu' in params.keys():
mu = np.copy(params['mu'])
if len(mu.shape) == 1:
mu = mu[:, np.newaxis]
T_ = None
if 'T' in params.keys(): # This should already be in the form -log(1 - T)
T_ = np.copy(params['T'])
if len(T_.shape) == 1:
T_ = T_[:, np.newaxis]
# Note that these values should be transformed
# print(T_[:10, 0])
# T_[:, 0] = 1 / (1 - T_[:, 0])
# print(T_[:10, 0])
# for i, t in tqdm.tqdm(enumerate(T)):
# T[i] = timeseries.dwell(t[0], ntrials=100)
elif isinstance(params, list):
A = self._flatten_A(params[0]['A'])
sigma = self._flatten_sigma(params[0]['sigma'])
for param_set in params[1:]:
A = np.concatenate((A, self._flatten_A(param_set['A'])))
sigma = np.concatenate((sigma, self._flatten_sigma(param_set['sigma'])))
else:
raise Exception('Input data type not recognized. Please pass a list or a dict.')
if A is not None:
if len(A.shape) < 2:
A = A[:, np.newaxis]
if sigma is not None:
if len(sigma.shape) < 2:
sigma = sigma[:, np.newaxis]
self.X = np.concatenate((A, sigma), axis=1)
else:
self.X = A
elif sigma is not None:
if len(sigma.shape) < 2:
sigma = sigma[:, np.newaxis]
self.X = sigma
if mu is not None:
if A is None and sigma is None:
self.X = mu
else:
self.X = np.concatenate((self.X, mu), axis=1)
if T_ is not None:
if A is None and sigma is None and mu is None:
self.X = T_
else:
self.X = np.concatenate((self.X, T_), axis=1)
if algorithm is 'agglomerative':
for d in range(self.X.shape[1]):
outliers_removed = stats.remove_outliers(self.X[:, d])
# outliers_removed = np.copy(self.X[:, d])
# print(outliers_removed.size)
# self.X[:, d] -= outliers_removed.min()
# self.X[:, d] /= outliers_removed.max()
self.X[:, d] -= outliers_removed.mean()
self.X[:, d] /= outliers_removed.std()
self.labels = None
self.clusters = None
# clusters = AgglomerativeClustering(n_clusters=None, distance_threshold=distance_threshold)
# self.labels = clusters.fit_predict(X)
def fit(self):
self.cluster_fxn()
self.labels = self.clusters.fit_predict(self.X)
self._remap_labels()
def _remap_labels(self):
""" relabel the labels counting from zero
:return:
"""
map_states = dict()
unique_labels = np.unique(self.labels)
for i, label in enumerate(unique_labels):
map_states[label] = i
self.labels = [map_states[l] for l in self.labels]
self.nclusters = unique_labels.size
def _agglomerative(self):
self.clusters = AgglomerativeClustering(n_clusters=self.nclusters, distance_threshold=self.distance_threshold,
linkage=self.linkage)
def _bayesian(self):
self.clusters = BayesianGaussianMixture(n_components=self.ncomponents, max_iter=self.max_iter,
weight_concentration_prior_type=self.weight_concentration_prior_type,
weight_concentration_prior=self.gamma,
mean_precision_prior=self.mean_precision_prior,
init_params=self.init_params, verbose=0)
def _flatten_A(self, A):
if self.eigs:
reordered = np.moveaxis(A, -1, 0)#[:, 0, ...] # reorder axes of A
eigs = np.linalg.eig(reordered)[0].real # eigenvalues of each matrix
if self.convert_rz:
eigs = np.concatenate((np.square(eigs[:, :2]).sum(axis=1)[:, np.newaxis], eigs[:, [2]]), axis=1)
return eigs # imaginary component usually very close to zero
elif self.diags:
return np.array([np.diag(A[..., a]) for a in range(A.shape[2])])
else:
# a = np.zeros([A.shape[0], A.shape[-1], A.shape[2]*A.shape[3]])
# for i in range(A.shape[-1]):
# for j in range(A.shape[0]):
# a[j, i, :] = A[j, 0, ..., i].flatten()
return A.reshape((A.shape[0]*A.shape[1], A.shape[2])).T
# if self.means:
#
# return a.mean(axis=0)
#
# else:
#
# return np.reshape(a, (a.shape[0]*a.shape[1], a.shape[2]))
def _flatten_sigma(self, sigma):
if self.eigs:
reordered = | np.moveaxis(sigma, -1, 0) | numpy.moveaxis |
import numpy as np
from matplotlib.figure import Figure
from matplotlib.artist import setp
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import colors, cm
from matplotlib.backends.backend_pdf import PdfPages
def plot_fancy_occupancy(hist, title, z_max=None, filename=None):
if z_max == 'median':
cmap = cm.get_cmap('coolwarm')
else:
cmap = cm.get_cmap('viridis')
cmap.set_bad('w')
if z_max == 'median':
z_max = 2 * np.ma.median(hist)
elif z_max == 'maximum' or z_max is None:
z_max = np.ma.max(hist)
if z_max < 1 or hist.all() is np.ma.masked or np.allclose(0, hist):
z_max = 1.0
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_title(title, size=6)
extent = [0.5, 1152.5, 576.5, 0.5]
bounds = np.linspace(start=0, stop=z_max, num=255, endpoint=True)
norm = colors.BoundaryNorm(bounds, cmap.N)
hist = np.ma.masked_equal(hist, 0)
im = ax.imshow(hist, interpolation='none', aspect='auto', cmap=cmap, norm=norm, extent=extent)
ax.set_ylim((576.5, 0.5))
ax.set_xlim((0.5, 1152.5))
ax.set_xlabel('Column')
ax.set_ylabel('Row')
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(ax)
axHistx = divider.append_axes("top", 1.2, pad=0.2, sharex=ax)
axHisty = divider.append_axes("right", 1.2, pad=0.2, sharey=ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cb = fig.colorbar(im, cax=cax, ticks=np.linspace(start=0, stop=z_max, num=9, endpoint=True))
cb.set_label("#")
# make some labels invisible
setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
hight = | np.ma.sum(hist, axis=0) | numpy.ma.sum |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 14:35:23 2018
a module for orthogonal linear separation analysis (OLSA)
@author: setsuo, shotaro, and tadahaya
"""
import sys
import csv
import math
import os
import numpy as np
import pandas as pd
np.seterr(divide='ignore', invalid='ignore')
import time
from sklearn.decomposition import PCA
from scipy import stats
from scipy.cluster.hierarchy import ward,leaves_list
from .data import Result
class Varimax:
"""Varimax rotation"""
def __init__(self, X):
self.n = np.shape(X)[0]
self.p = np.shape(X)[1]
self.A = np.matrix(X)
print("ok1")
if self.p > 1:
self.h = []
for i in range(0, self.n):
sum = self.A[i].dot(self.A[i].T)[0,0]
self.h.append(math.sqrt(sum))
if self.h[i]!=0:
self.A[i] /= self.h[i]
def rotateV(self,acceptable_error=1.0e-9):
"""varimax rotation"""
mm = lambda x: x*x
if (self.p < 2):
return self.A
while True:
ckA = np.matrix(self.A.A)
for i in range(0, self.p):
for j in range(i + 1, self.p):
x = np.matrix(self.A.T[i].A)
y = np.matrix(self.A.T[j].A)
u = np.matrix(x.A**2 - y.A**2)
v = np.matrix(2.0 * x.A * y.A)
cA = np.sum(u)
cB = np.sum(v)
cC = u.dot(u.T)[0,0] - v.dot(v.T)[0,0]
cD = 2.0 * u.dot(v.T)[0,0]
num = cD - 2 * cA * cB / self.n
den = cC - (mm(cA) - mm(cB)) /self.n
theta4 = math.atan(num / den)
if (num > 0.0):
if (theta4 < 0.0):
theta4 += math.pi
else:
if (theta4 > 0.0):
theta4 += math.pi
theta = theta4 / 4.0
tx = self.A.T[i] * math.cos(theta) + self.A.T[j] * math.sin(theta)
ty = -self.A.T[i] * math.sin(theta) + self.A.T[j] * math.cos(theta)
self.A.T[i] = tx
self.A.T[j] = ty
dif = np.sum((ckA.A-self.A.A)**2)
print("\r" + str(dif),end="")
if (dif < acceptable_error):
for i in range(0, self.n):
self.A[i] *= self.h[i]
break
if math.isnan(dif):
print("error")
sys.exit()
print("")
return self.A
class SmirnovGrubbs:
def __init__(self):
self.RemainedIndexes = list()
self.RemovedIndexesHigher = list()
self.RemovedIndexesLower = list()
self.NonbiasedVar = float()
self.alpha = float()
def calcSG(TS,alpha):
"""conduct SG test to exclude the data with unusual TS"""
res = SmirnovGrubbs()
res.alpha = alpha
Data = list()
RemovedDataHigher = list()
RemovedDataLower = list()
for i in range(0,TS.shape[0]):
Data.append([i,TS[i]])
while True:
n=len(Data)
if n<3:
break
t = stats.t.isf((alpha/n) / 2, n-2)
Gtest = (n-1)/math.sqrt(n) * math.sqrt(t**2 / (n-2 + t**2))
mean=0.0
for d in Data:
mean = mean + d[1]
mean = mean / n
var = 0.0
for d in Data:
var = var + (d[1]-mean)*(d[1]-mean)
var = var / n
sd = math.sqrt(var)
maxindex = Data[0][0]
maxvalue = math.fabs(Data[0][1] - mean)
for i in range(0,len(Data)):
if maxvalue < math.fabs(Data[i][1] - mean):
maxindex = i
maxvalue = math.fabs(Data[i][1] - mean)
#SmirnovGrubbs
if maxvalue / sd > Gtest:
if (Data[maxindex][1]-mean)>0:
RemovedDataHigher.append([Data[maxindex][0],Data[maxindex][1]])
else:
RemovedDataLower.append([Data[maxindex][0],Data[maxindex][1]])
del Data[maxindex]
else:
break
mean=0.0
for d in Data:
mean = mean + d[1]
mean = mean / n
ubvar = 0.0
for d in Data:
ubvar = ubvar + (d[1]-mean)*(d[1]-mean)
ubvar = ubvar / (n-1.0)
res.NonbiasedVar = ubvar
for d in Data:
res.RemainedIndexes.append(int(d[0]))
for d in RemovedDataHigher:
res.RemovedIndexesHigher.append(int(d[0]))
for d in RemovedDataLower:
res.RemovedIndexesLower.append(int(d[0]))
return res
def usspca(Data,accumulation=0.95,IsSphered=True,
UseMirror=True,UseSmirnovGrubbs=True,WardClusterSort=True):
"""
conduct Unit Spherized Symmetric PCA
Parameters
----------
Data: DataClass object
subjected to analysis
accumulation: float, default 0.95
% of cumulative contribution of the calculated vectors
IsSphered: boolean, default True
whether data is unit-sphereized before calculation
UseMirror: boolean, default True
whether the input data set is combined with the origin-symmetric set before calculation
UseSmirnovGrubbs: boolean, default True
whether outliers are excluded according to SG test
WardClusterSort: boolean, default True
whether response score matrix is sorted according to clustering with ward method
"""
if WardClusterSort==True: Data2 = Data.wardclustersort()
else: Data2 = Data
res = Result()
res.accumulation = accumulation
res.sphered = IsSphered
res.Name = Data2.Name[:]
res.index = Data2.index[:]
res.X = np.array(Data2.X)
res.filename = Data2.filename
print ('********************************')
print ('The conditions of this proccess')
print(('Contribution Accumulation Max is ' + str(accumulation)))
print(('IsSphered Flag is ' + str(IsSphered)))
print(('UseMirror Flag is ' + str(UseMirror)))
print(('UseSmirnovGrubbs Flag is ' + str(UseSmirnovGrubbs)))
print ('********************************')
#store Total Strength
res.TS = np.array([np.linalg.norm(s) for s in res.X.T])
if UseSmirnovGrubbs==True:
SGres = calcSG(res.TS,0.05)
print("excluded samples by SG test")
print("too large norm:")
print(list([res.Name[s] for s in SGres.RemovedIndexesHigher]))
print("too small norm:")
print(list([res.Name[s] for s in SGres.RemovedIndexesLower]))
#data reconstruction
remainedX = np.array([res.X.T[s] for s in SGres.RemainedIndexes]).T
else:
#data reconstruction
remainedX = res.X
#map the data on a unit sphere
XS = | np.array(res.X) | numpy.array |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def error(msg, code=9):
print('Error: ' + msg)
exit(code)
try:
import argparse
except:
error('This version of python is not new enough. python 2.7 or newer is required.')
try:
from netCDF4 import Dataset
except:
error('Unable to import netCDF4 module. Check your PYTHONPATH.\n'
+ 'Perhaps try:\n module load python_netcdf4')
try:
import numpy as np
except:
error('Unable to import numpy module. Check your PYTHONPATH.\n'
+ 'Perhaps try:\n module load python_numpy')
try:
import matplotlib.pyplot as plt
except:
error('Unable to import matplotlib.pyplot module. Check your PYTHONPATH.\n'
+ 'Perhaps try:\n module load python_matplotlib')
from matplotlib.widgets import Button, RadioButtons, TextBox, CheckButtons
from matplotlib.colors import LinearSegmentedColormap
import shutil as sh
from os.path import dirname, basename, join, splitext
import time
import sys
import os
import pwd
def main():
# Command line arguments
parser = argparse.ArgumentParser(description='''Point-wise editing of topography.
Ignore all the controls in the toolbar at the top of the window.
Zoom in and out with the scroll wheel.
Pan the view with the North, South, East and West buttons.
Use +, -, Flip buttons to modify the colormap.
Set the prescribed depth with the textbox at the bottom.
Left click on a cell to apply the prescribed depth value.
Right click on a cell to reset to the original value.
Double left click to assign the highest of the 4 nearest points with depth<0.
Close the window to write the edits to the output NetCDF file,
and also to a .txt file.
''',
epilog='Written by <NAME> (2013) and <NAME> (2020)')
parser.add_argument('filename', type=str,
help='NetCDF input file to read.')
parser.add_argument('variable', type=str,
nargs='?', default='depth',
help='Name of variable to edit. Defaults to "depth".')
parser.add_argument('--output', type=str, metavar='outfile',
nargs=1, default=[None],
help='Specify output NetCDF filename. Default is "edit_" prepended to the name of the input file. Text output filename is the same but with .nc replaced by .txt.')
parser.add_argument('--ref', type=str, metavar='reffile',
nargs=1, default=[None],
help='NetCDF reference input file to use for copying points from. Must have the same dimensions and variable name as filename.')
parser.add_argument('--apply', type=str, metavar='editfile',
nargs=1, default=[None],
help='Apply edits from iEdit, jEdit, zEdit variables in a NetCDF file, or from an ascii text file. Two text file formats are supported: whitespace-delimited (in which the first row begins with editTopo.py and ends with a version number (must be 1), data rows contain i, j, old, new (integers i, j count from 0; old is ignored), and anything following # is ignored), and the old edits file format (comma delimited i, j, new (i, j count from 1 and may be single integers or start:end inclusive integer ranges), and anything following # is ignored).')
parser.add_argument('--nogui',
action='store_true', default=False,
help="Don't open GUI. Best used with --apply, in which case editfile is applied to filename and saved as outfile, then program exits.")
parser.add_argument('--overwrite',
action='store_true', default=False,
help="Permit overwriting existing output files.")
optCmdLineArgs = parser.parse_args()
createGUI(optCmdLineArgs.filename, optCmdLineArgs.variable,
optCmdLineArgs.output[0], optCmdLineArgs.ref[0],
optCmdLineArgs.apply[0], optCmdLineArgs.nogui,
optCmdLineArgs.overwrite)
def createGUI(fileName, variable, outFile, refFile, applyFile, nogui, overwrite):
if not outFile:
outFile = join(dirname(fileName), 'edit_'+basename(fileName))
editsFile = splitext(outFile)[0]+'.txt'
if fileName == outFile:
error('Output filename must differ from input filename "{}". Exiting.'.format(fileName))
if not overwrite:
if os.path.exists(outFile) or os.path.exists(editsFile):
error('"{}" or "{}" already exists. To overwrite, use the --overwrite option.'.format(outFile, editsFile))
# Open NetCDF files
try:
rg = Dataset(fileName, 'r')
except:
error('There was a problem opening input NetCDF file "'+fileName+'".')
rgVar = rg.variables[variable] # handle to the variable
dims = rgVar.dimensions # tuple of dimensions
depth = rgVar[:] # Read the data
#depth = depth[0:600,0:600]
(nj, ni) = depth.shape
print('Range of input depths: min=', np.amin(depth), 'max=', np.amax(depth))
ref = None
if refFile:
try:
ref = Dataset(refFile, 'r').variables[variable][:]
except:
error('There was a problem opening reference NetCDF file "'+refFile+'".')
try:
sg = Dataset('supergrid.nc', 'r')
lon = sg.variables['x'][:]
lon = lon[0:2*nj+1:2, 0:2*ni+1:2]
lat = sg.variables['y'][:]
lat = lat[0:2*nj+1:2, 0:2*ni+1:2]
except:
lon, lat = np.meshgrid(np.arange(ni+1), np.arange(nj+1))
fullData = Topography(lon, lat, depth, ref)
class Container:
def __init__(self):
self.view = None
self.edits = None
self.data = None
self.quadMesh = None
self.cbar = None
self.ax = None
self.syms = None
self.useref = False
self.textbox = None
cdict = {'red': ((0.0, 0.0, 0.0), (0.5, 0.7, 0.0), (1.0, 0.9, 0.0)),
'green': ((0.0, 0.0, 0.0), (0.5, 0.7, 0.2), (1.0, 1.0, 0.0)),
'blue': ((0.0, 0.0, 0.2), (0.5, 1.0, 0.0), (1.0, 0.9, 0.0))}
cdict_r = {'red': ((0.0, 0.0, 0.0), (0.497, 0.7, 0.0), (1.0, 0.9, 0.0)),
'green': ((0.0, 0.0, 0.0), (0.497, 0.7, 0.2), (1.0, 1.0, 0.0)),
'blue': ((0.0, 0.0, 0.2), (0.497, 1.0, 0.0), (1.0, 0.9, 0.0))}
self.cmap1 = LinearSegmentedColormap('my_colormap', cdict, 256)
self.cmap2 = LinearSegmentedColormap('my_colormap', cdict_r, 256).reversed()
self.cmap3 = plt.get_cmap('seismic')
self.cmap = self.cmap1
self.prevcmap = self.cmap
self.clim = 6000
self.plotdiff = False
self.fieldname = None
All = Container()
All.view = View(ni, nj)
All.edits = Edits()
# Read edit data, if it exists
if 'iEdit' in rg.variables:
jEdit = rg.variables['iEdit'][:]
iEdit = rg.variables['jEdit'][:]
zEdit = rg.variables['zEdit'][:] # Original value of edited data
for l, i in enumerate(iEdit):
All.edits.setVal(fullData.height[iEdit[l], jEdit[l]])
fullData.height[iEdit[l], jEdit[l]] = zEdit[l] # Restore data
All.edits.add(iEdit[l], jEdit[l])
if applyFile:
try: # first try opening as a NetCDF
apply = Dataset(applyFile, 'r')
if 'iEdit' in apply.variables:
jEdit = apply.variables['iEdit'][:]
iEdit = apply.variables['jEdit'][:]
zNew = apply.variables[variable]
for l, i in enumerate(iEdit):
All.edits.add(iEdit[l], jEdit[l], zNew[iEdit[l], jEdit[l]])
apply.close()
except:
try: # if that fails, try opening as a text file
with open(applyFile, 'rt') as edFile:
edCount = 0
line = edFile.readline()
version = None
if line.startswith('editTopo.py'):
version = line.strip().split()[-1]
if version is None:
# assume this is in comma-delimited format ii, jj, zNew # comment
# where ii, jj may be integers or start:end inclusive integer ranges,
# indexed counting from 1
while line:
linedata = line.strip().split('#')[0].strip()
if linedata:
jEdits, iEdits, zNew = linedata.split(',') # swaps meaning of i & j
iEdits = [int(x) for x in iEdits.strip().split(':')]
jEdits = [int(x) for x in jEdits.strip().split(':')]
zNew = float(zNew.strip())
for ed in [iEdits, jEdits]:
if len(ed) == 1:
ed.append(ed[0]+1)
elif len(ed) == 2:
ed[1] += 1
else:
raise ValueError
for i in range(*iEdits):
for j in range(*jEdits):
All.edits.add(i-1, j-1, zNew) # -1 because ii, jj count from 1
edCount += 1
line = edFile.readline()
elif version == '1':
# whitespace-delimited format jEdit iEdit zOld zNew # comment
# where ii, jj are integer indices counting from 0
while line:
line = edFile.readline()
linedata = line.strip().split('#')[0].strip()
if linedata:
jEdit, iEdit, _, zNew = linedata.split() # swap meaning of i & j; ignore zOld
iEdit = int(iEdit)
jEdit = int(jEdit)
zNew = float(zNew)
All.edits.add(iEdit, jEdit, zNew)
edCount += 1
else:
error('Unsupported version "{}" in "{}".'.format(version, applyFile))
print('Applied {} cell edits from "{}".'.format(edCount, applyFile))
except:
error('There was a problem applying edits from "'+applyFile+'".')
All.data = fullData.cloneWindow(
(All.view.i0, All.view.j0), (All.view.iw, All.view.jw))
All.fieldname = All.data.fieldnames[0]
if All.edits.ijz:
All.data.applyEdits(fullData, All.edits.ijz)
# A mask based solely on value of depth
# notLand = np.where( depth<0, 1, 0)
# wet = ice9it(600,270,depth)
# plt.rcParams['toolbar'] = 'None' # don't use - also disables statusbar
def replot(All):
if All.cbar is not None:
All.cbar.remove()
h = plt.pcolormesh(All.data.longitude, All.data.latitude,
All.data.plotfield, cmap=All.cmap,
vmin=-All.clim, vmax=All.clim)
hc = plt.colorbar()
return(h, hc)
All.quadMesh, All.cbar = replot(All)
All.syms = All.edits.plot(fullData)
dir(All.syms)
All.ax = plt.gca()
All.ax.set_xlim(All.data.xlim)
All.ax.set_ylim(All.data.ylim)
if fullData.haveref:
def setsource(label):
All.fieldname = label
All.data.plotfield = All.data.fields[All.fieldname]
All.plotdiff = All.fieldname == All.data.fieldnames[2]
if All.plotdiff and All.cmap != All.cmap3:
All.prevcmap = All.cmap
All.cmap = All.cmap3
else:
All.cmap = All.prevcmap
All.quadMesh.set_cmap(All.cmap)
All.cbar.mappable.set_cmap(All.cmap)
All.quadMesh.set_array(All.data.plotfield.ravel())
plt.draw()
sourcebuttons = RadioButtons(plt.axes([.88, .4, 0.12, 0.15]),
All.data.fieldnames)
sourcebuttons.on_clicked(setsource)
def setDepth(str):
try:
All.edits.setVal(float(str))
except:
pass
tbax = plt.axes([0.12, 0.01, 0.3, 0.05])
textbox = TextBox(tbax, 'set depth', '0')
textbox.on_submit(setDepth)
textbox.on_text_change(setDepth)
def nothing(x,y):
return ''
tbax.format_coord = nothing # stop status bar displaying coords in textbox
All.textbox = textbox
if fullData.haveref:
All.useref = True
userefcheck = CheckButtons(plt.axes([0.42, 0.01, 0.11, 0.05]),
['use ref'], [All.useref])
def setuseref(_):
All.useref = userefcheck.get_status()[0]
if not All.useref:
All.edits.setVal(0.0)
All.textbox.set_val(repr(All.edits.newDepth))
userefcheck.on_clicked(setuseref)
else:
All.useref = False
lowerButtons = Buttons(left=.9)
def undoLast(event):
All.edits.pop()
All.data = fullData.cloneWindow(
(All.view.i0, All.view.j0), (All.view.iw, All.view.jw),
fieldname=All.fieldname)
All.data.applyEdits(fullData, All.edits.ijz)
All.quadMesh.set_array(All.data.plotfield.ravel())
All.edits.updatePlot(fullData, All.syms)
plt.draw()
lowerButtons.add('Undo', undoLast)
upperButtons = Buttons(bottom=1-.0615)
def colorScale(event):
Levs = [50, 100, 200, 500, 1000, 2000, 3000, 4000, 5000, 6000]
i = Levs.index(All.clim)
if event == ' + ':
i = min(i+1, len(Levs)-1)
elif event == ' - ':
i = max(i-1, 0)
elif event == 'Flip' and not All.plotdiff:
if All.cmap == All.cmap1:
All.cmap = All.cmap2
else:
All.cmap = All.cmap1
All.clim = Levs[i]
All.quadMesh.set_clim(vmin=-All.clim, vmax=All.clim)
All.quadMesh.set_cmap(All.cmap)
All.cbar.mappable.set_clim(vmin=-All.clim, vmax=All.clim)
All.cbar.mappable.set_cmap(All.cmap)
plt.draw()
def moveVisData(di, dj):
All.view.move(di, dj)
All.data = fullData.cloneWindow(
(All.view.i0, All.view.j0), (All.view.iw, All.view.jw),
fieldname=All.fieldname)
All.data.applyEdits(fullData, All.edits.ijz)
plt.sca(All.ax)
plt.cla()
All.quadMesh, All.cbar = replot(All)
All.ax.set_xlim(All.data.xlim)
All.ax.set_ylim(All.data.ylim)
All.syms = All.edits.plot(fullData)
plt.draw()
def moveWindowLeft(event): moveVisData(-1, 0)
upperButtons.add('West', moveWindowLeft)
def moveWindowRight(event): moveVisData(1, 0)
upperButtons.add('East', moveWindowRight)
def moveWindowDown(event): moveVisData(0, -1)
upperButtons.add('South', moveWindowDown)
def moveWindowUp(event): moveVisData(0, 1)
upperButtons.add('North', moveWindowUp)
climButtons = Buttons(bottom=1-.0615, left=0.75)
def incrCScale(event): colorScale(' + ')
climButtons.add(' + ', incrCScale)
def decrCScale(event): colorScale(' - ')
climButtons.add(' - ', decrCScale)
def revcmap(event): colorScale('Flip')
climButtons.add('Flip', revcmap)
plt.sca(All.ax)
def onClick(event): # Mouse button click
if event.inaxes == All.ax and event.button == 1 and event.xdata:
# left click: edit point
(i, j) = findPointInMesh(fullData.longitude, fullData.latitude,
event.xdata, event.ydata)
if i is not None:
(I, J) = findPointInMesh(All.data.longitude, All.data.latitude,
event.xdata, event.ydata)
if event.dblclick:
nVal = -99999
if All.data.height[I+1, J] < 0:
nVal = max(nVal, All.data.height[I+1, J])
if All.data.height[I-1, J] < 0:
nVal = max(nVal, All.data.height[I-1, J])
if All.data.height[I, J+1] < 0:
nVal = max(nVal, All.data.height[I, J+1])
if All.data.height[I, J-1] < 0:
nVal = max(nVal, All.data.height[I, J-1])
if nVal == -99999:
return
All.edits.add(i, j, nVal)
All.data.height[I, J] = nVal
else:
All.edits.add(i, j)
All.data.height[I, J] = All.edits.get()
if All.data.haveref:
All.data.diff[I, J] = All.data.height[I, J] - All.data.ref[I, J]
All.quadMesh.set_array(All.data.plotfield.ravel())
All.edits.updatePlot(fullData, All.syms)
plt.draw()
elif event.inaxes == All.ax and event.button == 3 and event.xdata:
# right click: undo edit
(i, j) = findPointInMesh(fullData.longitude, fullData.latitude,
event.xdata, event.ydata)
if i is not None:
All.edits.delete(i, j)
All.data = fullData.cloneWindow(
(All.view.i0, All.view.j0), (All.view.iw, All.view.jw),
fieldname=All.fieldname)
All.data.applyEdits(fullData, All.edits.ijz)
All.quadMesh.set_array(All.data.plotfield.ravel())
All.edits.updatePlot(fullData, All.syms)
plt.draw()
elif event.inaxes == All.ax and event.button == 2 and event.xdata:
zoom(event) # Re-center
plt.gcf().canvas.mpl_connect('button_press_event', onClick)
def zoom(event): # Scroll wheel up/down
if event.button == 'up':
scale_factor = 1/1.5 # deal with zoom in
elif event.button == 'down':
scale_factor = 1.5 # deal with zoom out
else:
scale_factor = 1.0
new_xlim, new_ylim = newLims(
All.ax.get_xlim(), All.ax.get_ylim(),
(event.xdata, event.ydata),
All.data.xlim, All.data.ylim,
All.view.ni, All.view.nj,
scale_factor)
if new_xlim is None:
return # No change in limits
All.view.seti(new_xlim)
All.view.setj(new_ylim)
All.data = fullData.cloneWindow(
(All.view.i0, All.view.j0), (All.view.iw, All.view.jw),
fieldname=All.fieldname)
All.data.applyEdits(fullData, All.edits.ijz)
plt.sca(All.ax)
plt.cla()
All.quadMesh, All.cbar = replot(All)
# All.ax.set_xlim(All.data.xlim)
# All.ax.set_ylim(All.data.ylim)
All.syms = All.edits.plot(fullData)
All.ax.set_xlim(new_xlim)
All.ax.set_ylim(new_ylim)
# All.cbar.mappable.set_clim(vmin=-All.clim, vmax=All.clim)
# All.cbar.mappable.set_cmap(All.cmap)
plt.draw() # force re-draw
plt.gcf().canvas.mpl_connect('scroll_event', zoom)
def statusMesg(x, y):
j, i = findPointInMesh(fullData.longitude, fullData.latitude, x, y)
if All.useref:
All.textbox.set_val(repr(fullData.ref[j, i])) # callback calls All.edits.setVal
if i is not None:
height = fullData.height[j, i]
newval = All.edits.getEdit(j, i)
if newval is not None:
return 'depth(%i,%i) = %g (was %g) depth - set depth = %g' % \
(i, j, newval, height, newval - All.edits.newDepth)
else:
return 'depth(%i,%i) = %g depth - set depth = %g' % \
(i, j, height, height - All.edits.newDepth)
else:
return 'new depth = %g' % \
(All.edits.newDepth)
All.ax.format_coord = statusMesg
if not nogui:
print("""
Ignore all the controls in the toolbar at the top of the window.
Zoom in and out with the scroll wheel.
Pan the view with the North, South, East and West buttons.
Use +, -, Flip buttons to modify the colormap.
Set the prescribed depth with the textbox at the bottom.
Left click on a cell to apply the prescribed depth value.
Right click on a cell to reset to the original value.
Double left click to assign the highest of the 4 nearest points with depth<0.
Close the window to write the edits to the output NetCDF file,
and also to a .txt file.
""")
plt.show()
# The following is executed after GUI window is closed
# All.edits.list()
if not outFile == ' ':
print('Made %i edits.' % (len(All.edits.ijz)))
print('Writing edited topography to "'+outFile+'".')
# Create new netcdf file
if not fileName == outFile:
sh.copyfile(fileName, outFile)
try:
rg = Dataset(outFile, 'r+')
except:
error('There was a problem opening "'+outFile+'".')
rgVar = rg.variables[variable] # handle to the variable
dims = rgVar.dimensions # tuple of dimensions
rgVar[:] = fullData.height[:, :] # Write the data
if All.edits.ijz:
# print('Applying %i edits' % (len(All.edits.ijz)))
if 'nEdits' in rg.dimensions:
numEdits = rg.dimensions['nEdits']
else:
numEdits = rg.createDimension(
'nEdits', 0) # len(All.edits.ijz))
if 'iEdit' in rg.variables:
iEd = rg.variables['iEdit']
else:
iEd = rg.createVariable('iEdit', 'i4', ('nEdits',))
iEd.long_name = 'i-index of edited data'
if 'jEdit' in rg.variables:
jEd = rg.variables['jEdit']
else:
jEd = rg.createVariable('jEdit', 'i4', ('nEdits',))
jEd.long_name = 'j-index of edited data'
if 'zEdit' in rg.variables:
zEd = rg.variables['zEdit']
else:
zEd = rg.createVariable('zEdit', 'f4', ('nEdits',))
zEd.long_name = 'Original value of edited data'
try:
zEd.units = rgVar.units
except AttributeError:
zEd.units = 'm'
hist_str = 'made %i changes (i, j, old, new): ' % len(All.edits.ijz)
for l, (i, j, z) in enumerate(All.edits.ijz):
if l > 0:
hist_str += ', '
iEd[l] = j
jEd[l] = i
zEd[l] = rgVar[i, j]
rgVar[i, j] = z
hist_str += repr((j, i, zEd[l].item(), rgVar[i, j].item()))
print(hist_str.replace(': ', ':\n').replace('), ', ')\n'))
hist_str = time.ctime(time.time()) + ' ' \
+ ' '.join(sys.argv) \
+ ' ' + hist_str
if 'history' not in rg.ncattrs():
rg.history = hist_str
else:
rg.history = rg.history + ' | ' + hist_str
# write editsFile even if no edits, so editsFile will match outFile
print('Writing list of edits to text file "'+editsFile+'" (this can be used with --apply).')
try:
with open(editsFile, 'wt') as edfile:
edfile.write('editTopo.py edits file version 1\n')
edfile.write('#\n# This file can be used as an argument for editTopo.py --apply\n#\n')
edfile.write('# created: ' + time.ctime(time.time()) + '\n')
edfile.write('# by: ' + pwd.getpwuid(os.getuid()).pw_name + '\n')
edfile.write('# via: ' + ' '.join(sys.argv) + '\n#\n')
if All.edits.ijz:
ii, jj, _ = zip(*All.edits.ijz)
news = [rgVar[i, j].item() for (i, j, _) in All.edits.ijz]
olds = [fullData.height[i, j].item() for (i, j, _) in All.edits.ijz]
iiwidth = max([len(repr(x)) for x in ii], default=0) + 2
jjwidth = max([len(repr(x)) for x in jj], default=0) + 2
oldwidth = max([len(repr(x)) for x in olds], default=0) + 2
edfile.write('# ' + \
'i'.rjust(jjwidth-2) + # swaps meaning of i & j
'j'.rjust(iiwidth) + # ditto
' ' +
'old'.ljust(oldwidth) +
'new' + '\n')
for (i, j, old, new) in zip(ii, jj, olds, news):
edfile.write(repr(j).rjust(jjwidth) + # swaps meaning of i & j
repr(i).rjust(iiwidth) + # ditto
' ' +
repr(old).ljust(oldwidth) +
repr(new) + '\n')
else:
edfile.write('# i j old new\n')
except:
error('There was a problem creating "'+editsFile+'".')
rg.close()
def ice9it(i, j, depth):
# Iterative implementation of "ice 9"
wetMask = 0*depth
(ni, nj) = wetMask.shape
stack = set()
stack.add((i, j))
while stack:
(i, j) = stack.pop()
if wetMask[i, j] or depth[i, j] >= 0:
continue
wetMask[i, j] = 1
if i > 0:
stack.add((i-1, j))
else:
stack.add((ni-1, j))
if i < ni-1:
stack.add((i+1, j))
else:
stack.add((0, j))
if j > 0:
stack.add((i, j-1))
if j < nj-1:
stack.add((i, j+1))
return wetMask
def findPointInMesh(meshX, meshY, pointX, pointY):
def sign(x):
if x > 0:
return 1.0
elif x < 0:
return -1.0
else:
return 0.
def crossProd(u0, v0, u1, v1):
return sign(u0*v1 - u1*v0)
def isPointInConvexPolygon(pX, pY, p):
u0 = pX[0]-pX[-1]
v0 = pY[0]-pY[-1]
u1 = pX[-1] - p[0]
v1 = pY[-1] - p[1]
firstSign = crossProd(u0, v0, u1, v1)
for n in range(len(pX)-1):
u0 = pX[n+1]-pX[n]
v0 = pY[n+1]-pY[n]
u1 = pX[n] - p[0]
v1 = pY[n] - p[1]
if crossProd(u0, v0, u1, v1)*firstSign < 0:
return False
return True
def recurIJ(mX, mY, p, ij00, ij22):
# Unpack indices
i0 = ij00[0]
i2 = ij22[0]
j0 = ij00[1]
j2 = ij22[1]
# Test bounding box first (bounding box is larger than polygon)
xmin = min(np.amin(mX[i0, j0:j2]), np.amin(
mX[i2, j0:j2]), np.amin(mX[i0:i2, j0]), np.amin(mX[i0:i2, j2]))
xmax = max(np.amax(mX[i0, j0:j2]), np.amax(
mX[i2, j0:j2]), np.amax(mX[i0:i2, j0]), np.amax(mX[i0:i2, j2]))
ymin = min(np.amin(mY[i0, j0:j2]), np.amin(
mY[i2, j0:j2]), np.amin(mY[i0:i2, j0]), np.amin(mY[i0:i2, j2]))
ymax = max(np.amax(mY[i0, j0:j2]), np.amax(
mY[i2, j0:j2]), np.amax(mY[i0:i2, j0]), | np.amax(mY[i0:i2, j2]) | numpy.amax |
# General Packages
from math import atan2, degrees
from datetime import datetime
from pathlib import Path
import time
import pprint
import numpy as np
import pandas as pd
import pickle
# Plotting
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.dates import date2num
import seaborn as sns
# Scaling
from sklearn.preprocessing import StandardScaler
settings = {
#
# audit settings
'data_name': 'credit',
'method_name': 'logreg',
'normalize_data': True,
'force_rational_actions': False,
#
# script flags
'audit_recourse': True,
'plot_audits': True,
'print_flag': True,
'save_flag': True,
'randomseed': 2338,
#
# placeholders
'method_suffixes': [''],
'audit_suffixes': [''],
}
# Paths
repo_dir = Path(__file__).absolute().parent.parent
paper_dir = repo_dir / 'paper/' # directory containing paper related info
data_dir = paper_dir / 'data/' # directory containing data files
results_dir = paper_dir / 'results/' # directory containing results
# create directories that don't exist
for d in [data_dir, results_dir]:
d.mkdir(exist_ok = True)
# Formatting Options
np.set_printoptions(precision = 4, suppress = False)
pd.set_option('display.max_columns', 30)
pd.options.mode.chained_assignment = None
pp = pprint.PrettyPrinter(indent = 4)
# Plotting Settings
sns.set(style="white", palette="muted", color_codes = True)
plt.rcParams['font.size'] = 20
plt.rcParams['axes.labelsize'] = 24
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rc('legend', fontsize = 20)
# file names
output_dir = results_dir / settings['data_name']
output_dir.mkdir(exist_ok = True)
if settings['normalize_data']:
settings['method_suffixes'].append('normalized')
if settings['force_rational_actions']:
settings['audit_suffixes'].append('rational')
# set file header
settings['dataset_file'] = '%s/%s_processed.csv' % (data_dir, settings['data_name'])
settings['file_header'] = '%s/%s_%s%s' % (output_dir, settings['data_name'], settings['method_name'], '_'.join(settings['method_suffixes']))
settings['audit_file_header'] = '%s%s' % (settings['file_header'], '_'.join(settings['audit_suffixes']))
settings['model_file'] = '%s_models.pkl' % settings['file_header']
settings['audit_file'] = '%s_audit_results.pkl' % settings['audit_file_header']
# Recourse Objects
from recourse.action_set import ActionSet
from recourse.builder import RecourseBuilder
from recourse.auditor import RecourseAuditor
from recourse.flipset import Flipset
### Helper Functions for Experimental Script
def load_data():
"""Helper function to load in data, and output that and optionally a scaler object:
Output:
data: dict with the following fields
outcome_name: Name of the outcome variable (inferred as the first column.)
variable_names: A list of names indicating input columns.
X: The input features for our model.
y: The column of the dataframe indicating our outcome variable.
scaler: The sklearn StandardScaler used to normalize the dataset, if we wish to scale.
X_scaled: Scaled version of X, if we wish to scale
X_train: The training set: set to the whole dataset if not scaled. Set to X_scaled if we do scale.
scaler:
Object used to scale data. If "scale" is set to None, then this is returned as None.
"""
# data set
data_df = pd.read_csv(settings['dataset_file'])
data = {
'outcome_name': data_df.columns[0],
'variable_names': data_df.columns[1:].tolist(),
'X': data_df.iloc[:, 1:],
'y': data_df.iloc[:, 0]
}
scaler = None
data['X_train'] = data['X']
data['scaler'] = None
if settings['normalize_data']:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
data['X_scaled'] = pd.DataFrame(scaler.fit_transform(data['X'].to_numpy(dtype=float), data['y'].values),
columns=data['X'].columns)
data['X_train'] = data['X_scaled']
data['scaler'] = scaler
return data, scaler
def undo_coefficient_scaling(clf = None, coefficients = None, intercept = 0.0, scaler = None):
"""
given coefficients and data for scaled data, returns coefficients and intercept for unnormalized data
w = w_scaled / sigma
b = b_scaled - (w_scaled / sigma).dot(mu) = b_scaled - w.dot(mu)
:param sklearn linear classifier
:param coefficients: vector of coefficients
:param intercept: scalar for the intercept function
:param scaler: sklearn.Scaler or
:return: coefficients and intercept for unnormalized data
"""
if coefficients is None:
assert clf is not None
assert intercept == 0.0
assert hasattr(clf, 'coef_')
coefficients = clf.coef_
intercept = clf.intercept_ if hasattr(clf, 'intercept_') else 0.0
if scaler is None:
w = np.array(coefficients)
b = float(intercept)
else:
isinstance(scaler, StandardScaler)
x_shift = np.array(scaler.mean_)
x_scale = np.sqrt(scaler.var_)
w = coefficients / x_scale
b = intercept - np.dot(w, x_shift)
w = np.array(w).flatten()
b = float(b)
return w, b
def get_coefficient_df(model_dict, variable_names = None, scaler = None):
"""
extract coefficients of all models and store them into a data.frame
:param model_dict: dictionary of models
:param variable_names:
:return:
"""
# get the coefficient values
assert isinstance(model_dict, dict)
coef_df = []
for k in sorted(model_dict.keys()):
coef_vals = model_dict[k].coef_.flatten()
intercept_val = model_dict[k].intercept_[0]
coef_vals, intercept_val = undo_coefficient_scaling(coefficients = coef_vals, intercept = intercept_val, scaler = scaler)
if variable_names is None:
coef_vals = (pd.Series(coef_vals, index = ['x%d' % j for j in range(coef_vals)]).to_frame(k))
else:
coef_vals = (pd.Series(coef_vals, index = variable_names).to_frame(k))
coef_df.append(coef_vals)
return pd.concat(coef_df, axis = 1)
def format_gridsearch_df(grid_search_df, settings, n_coefficients, invert_C = True):
"""
Take a fitted GridSearchCV and return:
model_stats_df: data frame containing 1 row for fold x free parameter instance.
columns include:
- 'data_name',
- 'method_name',
- 'free_parameter_name',
- 'free_parameter_value' (for each item in free parameter),
- training error,
- testing error,
- n_coefficients
:param grid_search_df:
:param n_coefficients: size of input dataset
:param invert_C: if C is a parameter, invert it (C = 1/lambda in l1 regression)
:return:
"""
train_score_df = (grid_search_df
.loc[:, filter(lambda x: 'train_score' in x and 'split' in x, grid_search_df.columns)]
.unstack()
.reset_index()
.rename(columns={'level_0': 'split_num', 0: 'train_score'})
.set_index('level_1')
.assign(split_num=lambda df: df.apply(lambda x: x['split_num'].replace('_train_score', ''), axis=1))
)
test_score_df = (grid_search_df
.loc[:, filter(lambda x: 'test_score' in x and 'split' in x, grid_search_df.columns)]
.unstack()
.reset_index()
.rename(columns={'level_0': 'split_num', 0: 'test_score'})
.set_index('level_1')
.assign(split_num=lambda df: df.apply(lambda x: x['split_num'].replace('_test_score', ''), axis=1)))
model_stats_df= pd.concat([train_score_df, test_score_df.drop('split_num', axis=1)], axis=1)
model_stats_df['dataname'] = settings['data_name']
param_df = (grid_search_df['params']
.apply(pd.Series))
if invert_C:
param_df['C'] = 1 / param_df['C']
param_df = (param_df.rename(
columns={col: 'param %d: %s' % (idx, col) for idx, col in enumerate(param_df.columns)})
).assign(key=grid_search_df['key'])
model_stats_df = (model_stats_df
.merge(param_df, left_index=True, right_index=True)
)
return model_stats_df.assign(n_coefficients=n_coefficients)
def get_flipset_solutions(model, data, action_set, mip_cost_type = 'max', scaler = None, print_flag = True):
"""
Run a basic audit of a model on the training dataset.
:param model:
:param data:
:param action_set:
:param mip_cost_type:
:param scaler:
:return:
"""
if scaler is not None:
yhat = model.predict(data['X_scaled'])
coefficients, intercept = undo_coefficient_scaling(coefficients= | np.array(model.coef_) | numpy.array |
""" This module is used to generate a 3d mesh based on a 2d section in
the xy-plane that is revolved around the x-axis. Note that only
quadratic elements are supported. For linear elements, Abaqus' builtin
routine works reasonably well (although the node coordinate accuracy
seem a bit low), see
:py:func:`~rollover.three_d.wheel.substructure.generate_3d_mesh`
"""
from __future__ import print_function
import numpy as np
from rollover.utils import naming_mod as names
def generate(wheel_model, mesh_size):
""" Based on a meshed 2d-profile of a wheel, generate a 3d-revolved
mesh with angular spacing such that the elements on the outer radius
have a circumferential size of mesh_size.
:param wheel_model: A model that contains a wheel part with a 2d
section mesh
:type wheel_model: Model object (Abaqus)
:param mesh_size: The mesh size to decide the angular increments
:type mesh_size: float
:returns: The wheel part and the angles for the element end planes
:type: tuple( Part object(Abaqus), np.array )
"""
wheel_part = wheel_model.parts[names.wheel_part]
# 1) Extract the 2d mesh
mesh_2d = get_2d_mesh(wheel_part)
# 2) Create the 3d-mesh
mesh_3d = make_3d_mesh_quad(mesh_2d, mesh_size)
# 3) Save the 3d-mesh to a part definition in an abaqus input file
input_file = save_3d_mesh_to_inp(mesh_3d)
# 4) Import the mesh. Delete the old part, and import the 3d mesh
del wheel_model.parts[names.wheel_part]
wheel_model.PartFromInputFile(inputFileName=input_file)
wheel_part = wheel_model.parts[names.wheel_part]
return wheel_part, mesh_3d['angles']
def get_2d_mesh(wheel_part):
""" Based on the wheel part, determine the 2d mesh information
:param wheel_part: The wheel part containing the 2d mesh
:type wheel_part: Part object (Abaqus)
:returns: Mesh specification with the following fields:
- nodes: np.array with node coordinates
- elements: dictionary with keys according to number of
nodes in element: N3,N4,N6,N8. Each item contains a list
of list of node labels
- edge_nodes: list of labels of nodes that belong to the
edges of the elements (and not the corners)
- corner_nodes: list of labels of nodes that belong to the
corners of the elements.
:rtype: dict
"""
node_coords = np.array([n.coordinates for n in wheel_part.nodes])
elements = {'N3': [], 'N4': [], 'N6': [], 'N8': []}
edge_nodes = []
corner_nodes = []
for e in wheel_part.elements:
enods = e.connectivity
num_enods = len(enods)
key = 'N' + str(num_enods)
if key in elements:
elements[key].append(enods)
else:
raise ValueError('Unknown element type with '
+ str(num_enods) + ' nodes.\n'
+ '- Element label: ' + e.label + '\n'
+ '- Element nodes: ' + enods + '\n'
+ '- Element type : ' + e.type + '\n')
if num_enods > 4: # 2nd order, second half of nodes on edges
for n in enods[:num_enods/2]:
if n not in corner_nodes:
corner_nodes.append(n)
for n in enods[num_enods/2:]:
if n not in edge_nodes:
edge_nodes.append(n)
else: # 1st order elements, all nodes at corners
for n in enods:
if n not in corner_nodes:
corner_nodes.append(n)
the_mesh = {'nodes': node_coords, 'elements': elements,
'edge_nodes': edge_nodes, 'corner_nodes': corner_nodes}
return the_mesh
def make_3d_mesh_quad(mesh_2d, mesh_size):
""" Revolve a 2d-mesh into a 3d-mesh
:param mesh_2d: Mesh specification with the following fields:
- nodes: np.array with node coordinates
- elements: dictionary with keys according to number
of nodes in element: N3,N4,N6,N8.
Each item contains a list of list of node labels
- edge_nodes: list of labels of nodes that belong to
the edges of the elements (and not the corners)
- corner_nodes: list of labels of nodes that belong
to the corners of the elements.
:type mesh_2d: dict
:param mesh_size: The circumferential mesh size at largest radius
:type mesh_size: float
:returns: Mesh specification with the following fields:
- nodes: np.array with node coordinates
- elements: dictionary with keys according to number
of nodes in element: N15, N20. Each item contains a list
of list of node labels
- angles: np.array of angles for angular increments of
elements.
:rtype: dict
"""
nodes_2d = mesh_2d['nodes']
elems_2d = mesh_2d['elements']
edge_node_num_2d = mesh_2d['edge_nodes']
corner_node_num_2d = mesh_2d['corner_nodes']
r_outer = np.max(np.abs(nodes_2d[:, 1]))
num_angles = int(r_outer*2*np.pi/mesh_size)
angles = np.linspace(0, 2*np.pi, num_angles+1)[:-1]
delta_angle = angles[1]-angles[0]
# Calculate size of mesh and allocate variables
num_corner_nodes_2d = len(corner_node_num_2d)
num_edge_nodes_2d = len(edge_node_num_2d)
num_nodes_per_section = 2*num_corner_nodes_2d + num_edge_nodes_2d
nodes = np.zeros((num_nodes_per_section*num_angles, 3), dtype=np.float)
corner_node_num = np.zeros((num_corner_nodes_2d, num_angles), dtype=np.int)
edge_ip_node_num = np.zeros((num_edge_nodes_2d, num_angles), dtype=np.int)
edge_op_node_num = np.zeros((num_corner_nodes_2d, num_angles), dtype=np.int)
edge_op_node_num[-1,-1] = -1 # Used the first iteration in the loop
for i, ang in enumerate(angles):
# Corner nodes
corner_node_num[:, i] = edge_op_node_num[-1,i-1] + 1 + np.arange(num_corner_nodes_2d)
for j, num in enumerate(corner_node_num[:,i]):
coords_2d = nodes_2d[corner_node_num_2d[j], :]
nodes[num, :] = rotate_coords(coords_2d, ang)
# Edge nodes (in plane)
edge_ip_node_num[:, i] = corner_node_num[-1,i] + 1 + np.arange(num_edge_nodes_2d)
for j, num in enumerate(edge_ip_node_num[:,i]):
coords_2d = nodes_2d[edge_node_num_2d[j], :]
nodes[num, :] = rotate_coords(coords_2d, ang)
# Edge nodes (out of plane, i.e. between angle increments,
# stemming from corner nodes in 2d)
edge_op_node_num[:, i] = edge_ip_node_num[-1,i] + 1 + np.arange(num_corner_nodes_2d)
for j, num in enumerate(edge_op_node_num[:,i]):
coords_2d = nodes_2d[corner_node_num_2d[j], :]
nodes[num, :] = rotate_coords(coords_2d, ang + delta_angle/2.0)
angle_inds = np.arange(num_angles+1)
angle_inds[-1] = 0
hex20_elems = get_elements(elems_2d['N8'], angle_inds, corner_node_num_2d,
edge_node_num_2d, corner_node_num, edge_ip_node_num,
edge_op_node_num)
wedge15_elems = get_elements(elems_2d['N6'], angle_inds, corner_node_num_2d,
edge_node_num_2d, corner_node_num, edge_ip_node_num,
edge_op_node_num)
mesh_3d = {'nodes': nodes,
'elements': {'N15': wedge15_elems, 'N20': hex20_elems},
'angles': angles}
return mesh_3d
def get_elements(elem_2d_con, angle_inds, corner_node_num_2d, edge_node_num_2d,
corner_node_num, edge_ip_node_num, edge_op_node_num):
""" Get the node lists of the revolved elements belonging to a given
set of node lists of elements from the 2d mesh.
:param elem_2d_con: list of list of 2d nodes for each element
:type elem_2d_con: list[ list[ int ] ]
:param angle_inds: indices of angles, counting 0, 1, 2, ..., N, 0
:type angle_inds: np.array
:param corner_node_num_2d: node numbers of corner nodes from 2d
:type corner_node_num_2d: list[ int ]
:param edge_node_num_2d: node numbers for edge nodes from 2d
:type edge_node_num_2d: list[ int ]
:param corner_node_num: array of node numbers for corner nodes in
3d. First index refers to index in
corner_node_num_2d and second index to
angle_inds
:type corner_node_num: np.array( int )
:param edge_ip_node_num: array of node numbers for in-plane nodes in
3d. First index refers to index in
edge_node_num_2d and second to angle_inds
:type edge_ip_node_num: np.array( int )
:param edge_op_node_num: array of node numbers for out-of-plane
nodes in 3d. First index refers to index
in corner_node_num_2d and second to
angle_inds.
:type edge_op_node_num: np.array( int )
:returns: list of list containing element node labels for 3d mesh
:rtype: np.array
"""
elems = []
n = len(elem_2d_con[0])/2
for enodes in elem_2d_con:
corner_rows = [corner_node_num_2d.index(node_num) for node_num in enodes[:n]]
edge_rows = [edge_node_num_2d.index(node_num) for node_num in enodes[n:]]
for i in range(len(angle_inds)-1):
elems.append([])
# Corner nodes
for j in range(2):
for cr in corner_rows:
elems[-1].append(corner_node_num[cr, angle_inds[i+(1-j)]])
# Edge nodes in plane
for j in range(2):
for er in edge_rows:
elems[-1].append(edge_ip_node_num[er, angle_inds[i+(1-j)]])
# Edge nodes between planes
for cr in corner_rows:
elems[-1].append(edge_op_node_num[cr, angle_inds[i]])
return np.array(elems)
def rotate_coords(coords, angles):
""" Rotate 2d coords in the xy-plane around the x-axis.
.. note::
The function supports either a list of coordinates or a list of
angles, not both at the same time
:param coords: Coordinates in xy-plane to be rotated. Can also
contain z-coordinate, but this is ignored.
Can be either a single coordinate, or 2d array. In
the latter case, the last index should give the axis,
i.e. size [N,2] or [N,3] where N is number of coords
:type coords: np.array
:param angles: List of angles to rotate a single coordinate with.
:type angles: float, int, list, np.array
:returns: An array of rotated coordinates: [N, 3], where N is number
of coordinates, i.e. N=max(len(angles), coords.shape[0])
:rtype: np.array
"""
if isinstance(angles, (float, int)):
rot_ang = [angles]
else:
rot_ang = angles
if len(coords.shape) == 1:
coords_rotated = np.zeros((len(rot_ang), 3))
coords_rotated[:,0] = coords[0]*np.ones((len(rot_ang)))
coords_rotated[:,1] = coords[1]*np.cos(rot_ang)
coords_rotated[:,2] = coords[1]*np.sin(rot_ang)
elif len(rot_ang) == 1:
coords_rotated = np.zeros((coords.shape[1], 3))
coords_rotated[:,0] = coords[:, 0]
coords_rotated[:,1] = coords[:, 1]* | np.cos(rot_ang[0]) | numpy.cos |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 18:25:54 2018
@author: paul
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
fs = 14
def plots(model):
# =============================================================================
# hystersis loop
# =============================================================================
fig, ax = plt.subplots()
time = model.time
x = model.x
dot_x = model.dot_x
Delta_x = model.Delta_x
distance = model.distance
car = 0
start = 0
end = model.iters
iters = end - start
jump = 1 # just plot every 3rd iteration to save time
fs =14
c = np.linspace(model.time[start],model.time[end-1],iters)
#ax.set_title("velocity vs. headway, car=" + str(car))
ax_scatter = ax.scatter(Delta_x[car,start:end:jump],dot_x[car,start:end:jump],marker="x",s=10,c=c[::jump])
ax.set_xlabel('headway [m]', fontsize = fs)
ax.set_ylabel('velocity [s]',fontsize = fs)
#ax.set_ylim(0,10)
#ax.set_xlim(0,15)
ax.tick_params(direction="in")
ax.set_title(r'ANN, $L=$'+str(model.L))
cb=fig.colorbar(ax_scatter, ax=ax)
cb.set_label(label="time [s]",size=fs)
# =============================================================================
# trajectories
# =============================================================================
fig, ax = plt.subplots()
for j in | np.arange(0,model.N,jump) | numpy.arange |
import cvxpy as cvx
import numpy as np
from typing import Optional, Set, Dict, Any
from snc.agents.hedgehog.minimal_draining_time import compute_minimal_draining_time_from_workload \
as compute_min_drain_time
from snc.agents.hedgehog.strategic_idling.strategic_idling import StrategicIdlingOutput, \
StrategicIdlingCore
from snc.agents.hedgehog.strategic_idling.strategic_idling_hedging import StrategicIdlingHedging
from snc.agents.hedgehog.strategic_idling.strategic_idling_utils import get_dynamic_bottlenecks, \
is_pull_model
from snc.utils.snc_types import WorkloadSpace, StateSpace
class StrategicIdlingHedgehogNaiveGTO(StrategicIdlingHedging):
"""
StrategicIdlingHedgehogNaiveGTO simply ensures that dynamic bottlenecks which determine
the minimum draining time in the network are not allowed to idle.
"""
def _is_switching_curve_regime(self, w: WorkloadSpace,
current_workload_variables: Dict[str, Any]) -> bool:
"""
The switching curve regime is determined by checking if workload at minimum
cost effective state (w_star) corresponds to longer minimum draining time than
for current workload.
:param w: current state in workload space, i.e. w = Xi x.
:param current_workload_variables: dictionary of relevant variables in workload space.
It must contain w_star.
:return: bool
"""
assert not is_pull_model(self.model_type), \
f"Minimum draining time is computed assuming workload vectors with o_s = 1. " \
f"But current environment is: {self.model_type}."
w_star = current_workload_variables['w_star']
current_min_drain_time = compute_min_drain_time(w, self._load)
new_min_drain_time = compute_min_drain_time(w_star, self._load)
tol = 1e-3
assert new_min_drain_time >= current_min_drain_time - tol, \
"Something is wrong here! Idling a non-dynamic bottleneck shouldn't increase the " \
f"draining time, but it has increased by {new_min_drain_time - current_min_drain_time}."
return new_min_drain_time > current_min_drain_time
def _handle_switching_curve_regime(self, w: WorkloadSpace,
current_workload_vars: Dict[str, Any]) -> Dict[str, Any]:
"""
In case switching curve regime has been identified, this method finds
the current set of dynamic bottlenecks which determine the minimum draining
time and returns the strategic idling output with idling set of all bottlenecks
excluding the dynamic ones.
:param w: current state in workload space, i.e. w = Xi @ x.
:param current_workload_vars: dictionary containing all the current worloadspace variables.
:return: Strategic idling decision with k_idling set overriden.
"""
dynamic_bottlenecks = get_dynamic_bottlenecks(w, self._workload_mat, self._load)
k_idling_set = np.array([i for i in range(len(w)) if i not in dynamic_bottlenecks])
current_workload_vars['k_idling_set'] = k_idling_set
return current_workload_vars
def _verify_standard_hedging_regime(self, current_workload_vars: Dict[str, Any]) -> None:
"""
Method is called in order to check whether network is indeed facing a standard hedging
regime by computing the dot product between the drift vector and vector determining
the boundary of monotone region (psi_plus).
Positive dot product corresponds to standard hedging regime.
:param current_workload_vars: dictionary containing all the current workload-space vars.
"""
hedging_case = current_workload_vars['hedging_case']
if hedging_case == 'standard': # Method invalid if psi_plus is altered artificially.
psi_plus = current_workload_vars['psi_plus']
drift = (1 - self._load).reshape(-1, 1)
psi_drift_dot = psi_plus.T @ drift
eps = 1e-6 # numerical tolerance
assert psi_drift_dot > -eps
def get_allowed_idling_directions(self, state: StateSpace) -> StrategicIdlingOutput:
"""
Method returns idling decision corresponding to either standard hedging or
switching curve regimes.
:param state: current buffer state of the network.
:return: set of allowed idling resources with auxiliary variables
"""
w = self._workload_mat @ state
self._verify_offline_preliminaries()
if self._is_negative_orthant(w) and not self._is_1d_workload_relaxation(w):
idling_decision_dict = self._negative_workloads(w)
regime = "negative_workloads"
else:
current_workload_vars = self._non_negative_workloads(w)
if self._is_decision_not_to_idle(current_workload_vars['k_idling_set']):
idling_decision_dict = current_workload_vars
regime = "no_dling"
elif self._is_switching_curve_regime(w, current_workload_vars):
idling_decision_dict = self._handle_switching_curve_regime(w,
current_workload_vars)
regime = "switching_curve"
else:
idling_decision_dict = self._add_standard_hedging(w, current_workload_vars)
self._verify_standard_hedging_regime(idling_decision_dict)
regime = "standard_hedging"
idling_decision = self._get_null_strategic_idling_output(**idling_decision_dict)
if self.debug_info:
print(f"beta_star: {idling_decision.beta_star}, "
f"k_iling_set: {idling_decision.k_idling_set}, "
f"sigma_2_h: {idling_decision.sigma_2_h}, "
f"delta_h: {idling_decision.delta_h}, "
f"regime: {regime}")
return idling_decision
class StrategicIdlingHedgehogGTO(StrategicIdlingHedgehogNaiveGTO):
"""
This class follows 'StrategicIdlingHedgehogNaiveGTO' but when encountering switching curve
regime adopts a more explicit GTO policy and tries to minimise the instantaneous cost
without compromising minimum draining time.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._current_dyn_bot_set: Optional[Set[int]] = None
self._min_drain_lp: Optional[cvx.Problem] = None
self._min_drain_x: Optional[cvx.Variable] = None
self._workload_critical_mat: Optional[cvx.Parameter] = None
self._workload_rest_mat: Optional[cvx.Parameter] = None
self._drain_time_rest_mat: Optional[cvx.Parameter] = None
self._w_critical: Optional[cvx.Parameter] = None
self._w_rest: Optional[cvx.Parameter] = None
self._min_drain_time: Optional[cvx.Parameter] = None
def _create_find_workload_with_min_draining_time_by_idling_lp_program(self):
num_resources, num_buffers = self._workload_mat.shape
n = num_resources
m = num_buffers
x_var = cvx.Variable((m, 1), nonneg=True) # Variable
workload_critical_mat = cvx.Parameter((n, m)) # workload matrix for dynamic bottlenecks
workload_rest_mat = cvx.Parameter((n, m)) # workload matrix for remaining bottlenecks
drain_time_rest_mat = cvx.Parameter((n, m)) # draining time matrix for remaining bottlenecks
w_critical = cvx.Parameter((n, 1)) # workload vector for dynamic bottlenecks
w_rest = cvx.Parameter((n, 1)) # workload vector for remaining bottlenecks
min_drain_time = cvx.Parameter(nonneg=True) # minimum draining time
w = w_critical + w_rest # the full workload vector
penalty_coeff_w_star = self.strategic_idling_params.penalty_coeff_w_star
objective = cvx.Minimize(
self._cost_per_buffer.T @ x_var
+ penalty_coeff_w_star * cvx.sum(self._workload_mat @ x_var - w))
constraints = [
# Don't idle critical bottlenecks.
workload_critical_mat @ x_var == w_critical,
# Reduce cost by idling (increasing workload).
workload_rest_mat @ x_var >= w_rest,
# Don't idle beyond the minimum draining time.
drain_time_rest_mat @ x_var <= min_drain_time]
lp_problem = cvx.Problem(objective, constraints)
self._min_drain_lp = lp_problem
self._min_drain_x = x_var
self._workload_critical_mat = workload_critical_mat
self._workload_rest_mat = workload_rest_mat
self._drain_time_rest_mat = drain_time_rest_mat
self._w_critical = w_critical
self._w_rest = w_rest
self._min_drain_time = min_drain_time
def _find_workload_with_min_draining_time_by_idling(self, w: WorkloadSpace) -> WorkloadSpace:
"""
This method first identifies a set of current dynamic bottlenecks which determine
the minimum draining time. If this set has changed from the previous call to the method,
the corresponding LP constraints for dynamic and remaining bottlenecks are updated.
LP is then solved to yield the target workload vector corresponding to minimum
effective cost subject to minimum draining time constraint.
:param w: current state in workload space, i.e. w = Xi x.
:return: w_min_drain, target workload vector
"""
dyn_bot_set = get_dynamic_bottlenecks(w, self._workload_mat, self._load)
ind_dyn_bot = np.array(list(dyn_bot_set))
# Update parameters of LP only when they change, i.e. when set of dynamic bottlenecks change
if self._current_dyn_bot_set is None or dyn_bot_set != self._current_dyn_bot_set:
workload_critical_mat = | np.zeros_like(self._workload_mat) | numpy.zeros_like |
import math
import time
from typing import Tuple
import numpy as np
from pgdrive.utils.cutils import import_cutils
cutils = import_cutils()
number_pos_inf = float("inf")
number_neg_inf = float("-inf")
def safe_clip(array, min_val, max_val):
array = np.nan_to_num(array.astype(np.float), copy=False, nan=0.0, posinf=max_val, neginf=min_val)
return np.clip(array, min_val, max_val).astype(np.float64)
def safe_clip_for_small_array(array, min_val, max_val):
array = list(array)
for i in range(len(array)):
if math.isnan(array[i]):
array[i] = 0.0
elif array[i] == number_pos_inf:
array[i] = max_val
elif array[i] == number_neg_inf:
array[i] = min_val
array[i] = clip(array[i], min_val, max_val)
return array
def wrap_to_pi(x: float) -> float:
return ((x + np.pi) % (2 * np.pi)) - np.pi
def get_vertical_vector(vector: np.array):
length = norm(vector[0], vector[1])
return (vector[1] / length, -vector[0] / length), (-vector[1] / length, vector[0] / length)
def time_me(fn):
def _wrapper(*args, **kwargs):
start = time.clock()
fn(*args, **kwargs)
print("%s cost %s second" % (fn.__name__, time.clock() - start))
return _wrapper
def norm(x, y):
# return math.sqrt(x**2 + y**2)
return cutils.cutils_norm(x, y)
def distance_greater(vec1, vec2, length):
"""Return whether the distance between two vectors is greater than the given length."""
return ((vec1[0] - vec2[0])**2 + (vec1[1] - vec2[1])**2) > length**2
def clip(a, low, high):
# Since we clip all observation all the times. So adding a breakpoint in this function is really helpful!
# if a < low:
# print('Small Value')
# if a > high:
# print('Large Value')
# return min(max(a, low), high)
return cutils.cutils_clip(a, low, high)
def dot(a, b):
return a[0] * b[0] + a[1] * b[1]
def dot3(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def do_every(duration: float, timer: float) -> bool:
return duration < timer
def not_zero(x: float, eps: float = 1e-2) -> float:
if abs(x) > eps:
return x
elif x > 0:
return eps
else:
return -eps
def rotated_rectangles_intersect(rect1: Tuple, rect2: Tuple) -> bool:
"""
Do two rotated rectangles intersect?
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
:return: do they?
"""
return has_corner_inside(rect1, rect2) or has_corner_inside(rect2, rect1)
def point_in_rectangle(point, rect_min, rect_max) -> bool:
"""
Check if a point is inside a rectangle
:param point: a point (x, y)
:param rect_min: x_min, y_min
:param rect_max: x_max, y_max
"""
return rect_min[0] <= point[0] <= rect_max[0] and rect_min[1] <= point[1] <= rect_max[1]
def point_in_rotated_rectangle(point: np.ndarray, center: np.ndarray, length: float, width: float, angle: float) \
-> bool:
"""
Check if a point is inside a rotated rectangle
:param point: a point
:param center: rectangle center
:param length: rectangle length
:param width: rectangle width
:param angle: rectangle angle [rad]
:return: is the point inside the rectangle
"""
c, s = math.cos(angle), math.sin(angle)
r = np.array([[c, -s], [s, c]])
ru = r.dot(point - center)
return point_in_rectangle(ru, (-length / 2, -width / 2), (length / 2, width / 2))
def has_corner_inside(rect1: Tuple, rect2: Tuple) -> bool:
"""
Check if rect1 has a corner inside rect2
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
"""
(c1, l1, w1, a1) = rect1
(c2, l2, w2, a2) = rect2
c1 = np.array(c1)
l1v = np.array([l1 / 2, 0])
w1v = np.array([0, w1 / 2])
r1_points = np.array([[0, 0], -l1v, l1v, -w1v, w1v, -l1v - w1v, -l1v + w1v, +l1v - w1v, +l1v + w1v])
c, s = math.cos(a1), math.sin(a1)
r = np.array([[c, -s], [s, c]])
rotated_r1_points = r.dot(r1_points.transpose()).transpose()
return any([point_in_rotated_rectangle(c1 + | np.squeeze(p) | numpy.squeeze |
import numpy as np
from scipy.integrate import cumtrapz
def imp_to_step(t_imp, y_imp):
dt = np.median( | np.diff(t_imp) | numpy.diff |
import torch
import numpy as np
from multiprocessing.pool import Pool
from collections import Counter
import shutil
import subprocess
import tifffile as tiff
import platform
import re
import os
def iou_pytorch(predictions, labels):
""" Online IoU-metric (batch-wise over all classes).
:param predictions: Batch of predictions.
:type predictions:
:param labels: Batch of ground truths / label images.
:type labels:
:param device: cuda (gpu) or cpu.
:type device:
:return: Intersection over union.
"""
a = predictions.clone().detach()
# Apply sigmoid activation function in one-class problems
a = torch.sigmoid(a)
# Flatten predictions and apply threshold
a = a.view(-1) > torch.tensor([0.5], requires_grad=False, device=a.device)
# Flatten labels
b = labels.clone().detach().view(-1).bool()
# Calculate intersection over union
intersection = torch.sum((a * b).float())
union = torch.sum(torch.max(a, b).float())
iou = intersection / (union + 1e-6)
return iou
def metric_collection(prediction, ground_truth, num_threads=8, path_ctc_software=None):
""" Calculation of Rand-Index, Jaccard-Index, mean average precision at different intersection over union
thresholds (P_IoU), precision, recall, F-score and split/merged/missing/spurious objects.
:param prediction: Prediction with intensity coded nuclei.
:type prediction:
:param ground_truth: Ground truth image with intensity coded nuclei.
:type ground_truth:
:param num_threads: Number of threads to speeden up the computation.
:type num_threads: int
:return: Dictionary containing the metric scores.
"""
# Create copy of the prediction and ground truth to avoid changing them
pred, gt = np.copy(prediction), np.copy(ground_truth)
# Find intensity coded nuclei in the ground truth image and the prediction (simply looking for the maximum is not
# possible because in the post-processing numbered seeds can vanish, additionally for tracking data some nuclei
# may not appear at that time point)
nucleus_ids_ground_truth, nucleus_ids_prediction = get_nucleus_ids(gt), get_nucleus_ids(pred)
# Number of cell nuclei in the ground truth image and in the prediction
num_nuclei_ground_truth, num_nuclei_prediction = len(nucleus_ids_ground_truth), len(nucleus_ids_prediction)
# Check for empty predictions
if num_nuclei_prediction == 0:
return {'Rand_index': 0, 'Jaccard_index': 0, 'Aggregated_Jaccard_index': 0, 'P_IoU': 0, 'Precision': 0,
'Recall': 0, 'F-Score': 0, 'Split': 0, 'Merged': 0, 'Missing': num_nuclei_ground_truth, 'Spurious': 0
}, 0
# Check for missing nuclei ids in the prediction. To build the intersection histogram the nuclei_ids should range
# from 1 to the number of nuclei.
if num_nuclei_prediction != pred.max():
hist = np.histogram(pred, bins=range(1, pred.max() + 2), range=(1, pred.max() + 1))
# Find missing values
missing_values = np.where(hist[0] == 0)[0]
# Decrease the ids of the nucleus with higher id than the missing. Reverse the list to avoid problems in case
# of multiple missing objects
for th in reversed(missing_values):
pred[pred > th] = pred[pred > th] - 1
# Check for missing nuclei ids in the ground truth. To build the intersection histogram the nuclei_ids should range
# from 1 to the number of nuclei.
if num_nuclei_ground_truth != gt.max():
hist = np.histogram(gt, bins=range(1, gt.max() + 2), range=(1, gt.max() + 1))
# Find missing values
missing_values = np.where(hist[0] == 0)[0]
# Decrease the ids of the nucleus with higher id than the missing. Reverse the list to avoid problems in case
# of multiple missing objects
for th in reversed(missing_values):
gt[gt > th] = gt[gt > th] - 1
# Change the background label from 0 to num_nuclei + 1. This enables to calculate the intersection with the
# background efficiently.
bg_gt, bg_pred = num_nuclei_ground_truth + 1, num_nuclei_prediction + 1
pred[pred == 0] = bg_pred
gt[gt == 0] = bg_gt
nucleus_ids_ground_truth, nucleus_ids_prediction = get_nucleus_ids(gt), get_nucleus_ids(pred)
# Preallocate arrays for the intersection histogram
intersections = np.zeros(shape=(num_nuclei_ground_truth+1, num_nuclei_prediction+1), dtype=np.uint64)
# Create list to calculate the histogram entries in parallel
result_list = []
if (num_nuclei_prediction + 1) > num_threads:
fraction = (num_nuclei_prediction + 1) / num_threads # + 1 because the background is added
for i in range(num_threads):
result_list.append([pred,
gt,
nucleus_ids_prediction[int(i * fraction):int((i+1) * fraction)],
nucleus_ids_ground_truth])
else:
result_list.append([pred, gt, nucleus_ids_prediction, nucleus_ids_ground_truth])
# Calculate the intersection histogram entries in parallel
pool = Pool(num_threads)
intersection_hist_entries = pool.map(intersection_hist_col, result_list)
pool.close()
# Pack the intersection histogram column lists into a single list
for i in range(len(intersection_hist_entries)):
for j in range(len(intersection_hist_entries[i])):
col = intersection_hist_entries[i][j][0]
if col == bg_pred: # Move background column to the first
col = 0
intersections[:, col] = intersection_hist_entries[i][j][1]
# Calculate Rand index and Jaccard index
a, b, c, n = 0, 0, 0, len(prediction.flatten())
for i in range(intersections.shape[0]):
row_sum = np.sum(intersections[i, :], dtype=np.uint64)
b += row_sum * (row_sum - 1) / 2
for j in range(intersections.shape[1]):
if i == 0:
col_sum = np.sum(intersections[:, j], dtype=np.uint64)
c += col_sum * (col_sum - 1) / 2
a += intersections[i, j].astype(np.float64) * (intersections[i, j].astype(np.float64) - 1) / 2
b -= a
c -= a
d = n * (n - 1) / 2 - a - b - c
rand, jaccard = (a + d) / (a + b + c + d), (a + d) / (b + c + d)
# Match objects with maximum intersections to detect split, merged, missing and spurious objects
gt_matches, pred_matches, merged, missing, split, spurious = [], [], 0, 0, 0, 0
for i in range(intersections.shape[0]):
gt_matches.append(np.argmax(intersections[i, :]))
for j in range(intersections.shape[1]):
pred_matches.append(np.argmax(intersections[:, j]))
gt_matches_counts, pred_matches_counts = Counter(gt_matches), Counter(pred_matches)
for nucleus in gt_matches_counts:
if nucleus == 0 and gt_matches_counts[nucleus] > 1:
missing = gt_matches_counts[nucleus] - 1
elif nucleus != 0 and gt_matches_counts[nucleus] > 1:
merged += gt_matches_counts[nucleus] - 1
for nucleus in pred_matches_counts:
if nucleus == 0 and pred_matches_counts[nucleus] > 1:
spurious = pred_matches_counts[nucleus] - 1
elif nucleus != 0 and pred_matches_counts[nucleus] > 1:
split += pred_matches_counts[nucleus] - 1
# Aggregated Jaccard index and P_IoU (for the best IoU it does not matter if the predictions are matched to ground
# truth nuclei or the other way around since the lowest threshold used later is 0.5, for the Jaccard-index it does).
result_list = [] # Create list to find the best intersections and the corresponding unions in parallel
if len(gt_matches) > num_threads:
fraction = len(gt_matches) / num_threads
for i in range(num_threads):
result_list.append([pred, gt, intersections, list(range(int(i * fraction), int((i+1) * fraction)))])
else:
result_list.append([pred, gt, intersections, list(range(1, len(gt_matches)))])
pool = Pool(num_threads)
best_intersections_unions = pool.map(aggregated_iou_score, result_list)
pool.close()
aggregated_intersection, aggregated_union, used_nuclei_pred, iou = 0, 0, [], []
for i in range(len(best_intersections_unions)):
aggregated_intersection += best_intersections_unions[i][0]
aggregated_union += best_intersections_unions[i][1]
used_nuclei_pred = used_nuclei_pred + best_intersections_unions[i][2]
iou = iou + best_intersections_unions[i][3]
for nucleus in nucleus_ids_prediction[:-1]: # Exclude background
if nucleus not in used_nuclei_pred:
aggregated_union += np.sum(pred == nucleus)
aggregated_jaccard_index = aggregated_intersection / aggregated_union
# Preallocate arrays for true positives, false negatives and true positives for each IoU threshold
tp = np.zeros(shape=(10,), dtype=np.uint16)
fp = np.zeros(shape=(10,), dtype=np.uint16)
fn = np.zeros(shape=(10,), dtype=np.uint16)
# Count true positives, false positives and false negatives for different IoU-thresholds th
for i, th in enumerate(np.arange(0.5, 1.0, 0.05)):
matches = iou > th
# True positive: IoU > threshold
tp[i] = | np.count_nonzero(matches) | numpy.count_nonzero |
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
census=np.concatenate((new_record,data),axis=0)
print(census.shape)
age=census[:,0]
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.round_(np.mean(age),2)
age_std=np.round_(np.std(age),2)
print(age,min_age,max_age,age_mean,age_std)
race_0=np.where(census[:,2]==0)[0]
print(race_0)
race_1= | np.where(census[:,2]==1) | numpy.where |
from __future__ import division, print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import streakline
#import streakline2
import myutils
import ffwd
from streams import load_stream, vcirc_potential, store_progparams, wrap_angles, progenitor_prior
#import streams
import astropy
import astropy.units as u
from astropy.constants import G
from astropy.table import Table
import astropy.coordinates as coord
import gala.coordinates as gc
import scipy.linalg as la
import scipy.interpolate
import scipy.optimize
import zscale
import itertools
import copy
import pickle
# observers
# defaults taken as in astropy v2.0 icrs
mw_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vsun = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vsun0 = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
gc_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 0.1*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vgc = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vgc0 = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
MASK = -9999
pparams_fid = [np.log10(0.5e10)*u.Msun, 0.7*u.kpc, np.log10(6.8e10)*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
#pparams_fid = [0.5e-5*u.Msun, 0.7*u.kpc, 6.8e-5*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
class Stream():
def __init__(self, x0=[]*u.kpc, v0=[]*u.km/u.s, progenitor={'coords': 'galactocentric', 'observer': {}, 'pm_polar': False}, potential='nfw', pparams=[], minit=2e4*u.Msun, mfinal=2e4*u.Msun, rcl=20*u.pc, dr=0.5, dv=2*u.km/u.s, dt=1*u.Myr, age=6*u.Gyr, nstars=600, integrator='lf'):
"""Initialize """
setup = {}
if progenitor['coords']=='galactocentric':
setup['x0'] = x0
setup['v0'] = v0
elif (progenitor['coords']=='equatorial') & (len(progenitor['observer'])!=0):
if progenitor['pm_polar']:
a = v0[1].value
phi = v0[2].value
v0[1] = a*np.sin(phi)*u.mas/u.yr
v0[2] = a*np.cos(phi)*u.mas/u.yr
# convert positions
xeq = coord.SkyCoord(x0[0], x0[1], x0[2], **progenitor['observer'])
xgal = xeq.transform_to(coord.Galactocentric)
setup['x0'] = [xgal.x.to(u.kpc), xgal.y.to(u.kpc), xgal.z.to(u.kpc)]*u.kpc
# convert velocities
setup['v0'] = gc.vhel_to_gal(xeq.icrs, rv=v0[0], pm=v0[1:], **vsun)
#setup['v0'] = [v.to(u.km/u.s) for v in vgal]*u.km/u.s
else:
raise ValueError('Observer position needed!')
setup['dr'] = dr
setup['dv'] = dv
setup['minit'] = minit
setup['mfinal'] = mfinal
setup['rcl'] = rcl
setup['dt'] = dt
setup['age'] = age
setup['nstars'] = nstars
setup['integrator'] = integrator
setup['potential'] = potential
setup['pparams'] = pparams
self.setup = setup
self.setup_aux = {}
self.fill_intid()
self.fill_potid()
self.st_params = self.format_input()
def fill_intid(self):
"""Assign integrator ID for a given integrator choice
Assumes setup dictionary has an 'integrator' key"""
if self.setup['integrator']=='lf':
self.setup_aux['iaux'] = 0
elif self.setup['integrator']=='rk':
self.setup_aux['iaux'] = 1
def fill_potid(self):
"""Assign potential ID for a given potential choice
Assumes d has a 'potential' key"""
if self.setup['potential']=='nfw':
self.setup_aux['paux'] = 3
elif self.setup['potential']=='log':
self.setup_aux['paux'] = 2
elif self.setup['potential']=='point':
self.setup_aux['paux'] = 0
elif self.setup['potential']=='gal':
self.setup_aux['paux'] = 4
elif self.setup['potential']=='lmc':
self.setup_aux['paux'] = 6
elif self.setup['potential']=='dipole':
self.setup_aux['paux'] = 8
elif self.setup['potential']=='quad':
self.setup_aux['paux'] = 9
elif self.setup['potential']=='octu':
self.setup_aux['paux'] = 10
def format_input(self):
"""Format input parameters for streakline.stream"""
p = [None]*12
# progenitor position
p[0] = self.setup['x0'].si.value
p[1] = self.setup['v0'].si.value
# potential parameters
p[2] = [x.si.value for x in self.setup['pparams']]
# stream smoothing offsets
p[3] = [self.setup['dr'], self.setup['dv'].si.value]
# potential and integrator choice
p[4] = self.setup_aux['paux']
p[5] = self.setup_aux['iaux']
# number of steps and stream stars
p[6] = int(self.setup['age']/self.setup['dt'])
p[7] = int(p[6]/self.setup['nstars'])
# cluster properties
p[8] = self.setup['minit'].si.value
p[9] = self.setup['mfinal'].si.value
p[10] = self.setup['rcl'].si.value
# time step
p[11] = self.setup['dt'].si.value
return p
def generate(self):
"""Create streakline model for a stream of set parameters"""
#xm1, xm2, xm3, xp1, xp2, xp3, vm1, vm2, vm3, vp1, vp2, vp3 = streakline.stream(*p)
stream = streakline.stream(*self.st_params)
self.leading = {}
self.leading['x'] = stream[:3]*u.m
self.leading['v'] = stream[6:9]*u.m/u.s
self.trailing = {}
self.trailing['x'] = stream[3:6]*u.m
self.trailing['v'] = stream[9:12]*u.m/u.s
def observe(self, mode='cartesian', wangle=0*u.deg, units=[], errors=[], nstars=-1, sequential=False, present=[], logerr=False, observer={'z_sun': 0.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_ra': 300*u.deg, 'galcen_dec': 20*u.deg}, vobs={'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}, footprint='none', rotmatrix=None):
"""Observe the stream
stream.obs holds all observations
stream.err holds all errors"""
x = np.concatenate((self.leading['x'].to(u.kpc).value, self.trailing['x'].to(u.kpc).value), axis=1) * u.kpc
v = np.concatenate((self.leading['v'].to(u.km/u.s).value, self.trailing['v'].to(u.km/u.s).value), axis=1) * u.km/u.s
if mode=='cartesian':
# returns coordinates in following order
# x(x, y, z), v(vx, vy, vz)
if len(units)<2:
units.append(self.trailing['x'].unit)
units.append(self.trailing['v'].unit)
if len(errors)<2:
errors.append(0.2*u.kpc)
errors.append(2*u.km/u.s)
# positions
x = x.to(units[0])
ex = np.ones(np.shape(x))*errors[0]
ex = ex.to(units[0])
# velocities
v = v.to(units[1])
ev = np.ones(np.shape(v))*errors[1]
ev = ev.to(units[1])
self.obs = np.concatenate([x,v]).value
self.err = np.concatenate([ex,ev]).value
elif mode=='equatorial':
# assumes coordinates in the following order:
# ra, dec, distance, vrad, mualpha, mudelta
if len(units)!=6:
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
if len(errors)!=6:
errors = [0.2*u.deg, 0.2*u.deg, 0.5*u.kpc, 1*u.km/u.s, 0.2*u.mas/u.yr, 0.2*u.mas/u.yr]
# define reference frame
xgal = coord.Galactocentric(x, **observer)
#frame = coord.Galactocentric(**observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, v, **vobs)
# store coordinates
ra, dec, dist = [xeq.ra.to(units[0]).wrap_at(wangle), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vr, mua, mud = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
obs = np.hstack([ra, dec, dist, vr, mua, mud]).value
obs = np.reshape(obs,(6,-1))
if footprint=='sdss':
infoot = dec > -2.5*u.deg
obs = obs[:,infoot]
if np.allclose(rotmatrix, np.eye(3))!=1:
xi, eta = myutils.rotate_angles(obs[0], obs[1], rotmatrix)
obs[0] = xi
obs[1] = eta
self.obs = obs
# store errors
err = np.ones(np.shape(self.obs))
if logerr:
for i in range(6):
err[i] *= np.exp(errors[i].to(units[i]).value)
else:
for i in range(6):
err[i] *= errors[i].to(units[i]).value
self.err = err
self.obsunit = units
self.obserror = errors
# randomly select nstars from the stream
if nstars>-1:
if sequential:
select = np.linspace(0, np.shape(self.obs)[1], nstars, endpoint=False, dtype=int)
else:
select = np.random.randint(low=0, high=np.shape(self.obs)[1], size=nstars)
self.obs = self.obs[:,select]
self.err = self.err[:,select]
# include only designated dimensions
if len(present)>0:
self.obs = self.obs[present]
self.err = self.err[present]
self.obsunit = [ self.obsunit[x] for x in present ]
self.obserror = [ self.obserror[x] for x in present ]
def prog_orbit(self):
"""Generate progenitor orbital history"""
orbit = streakline.orbit(self.st_params[0], self.st_params[1], self.st_params[2], self.st_params[4], self.st_params[5], self.st_params[6], self.st_params[11], -1)
self.orbit = {}
self.orbit['x'] = orbit[:3]*u.m
self.orbit['v'] = orbit[3:]*u.m/u.s
def project(self, name, N=1000, nbatch=-1):
"""Project the stream from observed to native coordinates"""
poly = np.loadtxt("../data/{0:s}_all.txt".format(name))
self.streak = np.poly1d(poly)
self.streak_x = np.linspace(np.min(self.obs[0])-2, np.max(self.obs[0])+2, N)
self.streak_y = np.polyval(self.streak, self.streak_x)
self.streak_b = np.zeros(N)
self.streak_l = np.zeros(N)
pdot = np.polyder(poly)
for i in range(N):
length = scipy.integrate.quad(self._delta_path, self.streak_x[0], self.streak_x[i], args=(pdot,))
self.streak_l[i] = length[0]
XB = np.transpose(np.vstack([self.streak_x, self.streak_y]))
n = np.shape(self.obs)[1]
if nbatch<0:
nstep = 0
nbatch = -1
else:
nstep = np.int(n/nbatch)
i1 = 0
i2 = nbatch
for i in range(nstep):
XA = np.transpose(np.vstack([np.array(self.obs[0][i1:i2]), np.array(self.obs[1][i1:i2])]))
self.emdist(XA, XB, i1=i1, i2=i2)
i1 += nbatch
i2 += nbatch
XA = np.transpose(np.vstack([np.array(self.catalog['ra'][i1:]), np.array(self.catalog['dec'][i1:])]))
self.emdist(XA, XB, i1=i1, i2=n)
#self.catalog.write("../data/{0:s}_footprint_catalog.txt".format(self.name), format='ascii.commented_header')
def emdist(self, XA, XB, i1=0, i2=-1):
""""""
distances = scipy.spatial.distance.cdist(XA, XB)
self.catalog['b'][i1:i2] = np.min(distances, axis=1)
imin = np.argmin(distances, axis=1)
self.catalog['b'][i1:i2][self.catalog['dec'][i1:i2]<self.streak_y[imin]] *= -1
self.catalog['l'][i1:i2] = self.streak_l[imin]
def _delta_path(self, x, pdot):
"""Return integrand for calculating length of a path along a polynomial"""
return np.sqrt(1 + np.polyval(pdot, x)**2)
def plot(self, mode='native', fig=None, color='k', **kwargs):
"""Plot stream"""
# Plotting
if fig==None:
plt.close()
plt.figure()
ax = plt.axes([0.12,0.1,0.8,0.8])
if mode=='native':
# Color setup
cindices = np.arange(self.setup['nstars']) # colors of stream particles
nor = mpl.colors.Normalize(vmin=0, vmax=self.setup['nstars']) # colormap normalization
plt.plot(self.setup['x0'][0].to(u.kpc).value, self.setup['x0'][2].to(u.kpc).value, 'wo', ms=10, mew=2, zorder=3)
plt.scatter(self.trailing['x'][0].to(u.kpc).value, self.trailing['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='winter', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.scatter(self.leading['x'][0].to(u.kpc).value, self.leading['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='autumn', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.xlabel("X (kpc)")
plt.ylabel("Z (kpc)")
elif mode=='observed':
plt.subplot(221)
plt.plot(self.obs[0], self.obs[1], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Dec")
plt.subplot(223)
plt.plot(self.obs[0], self.obs[2], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Distance")
plt.subplot(222)
plt.plot(self.obs[3], self.obs[4], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\\alpha$")
plt.subplot(224)
plt.plot(self.obs[3], self.obs[5], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\delta$")
plt.tight_layout()
#plt.minorticks_on()
def read(self, fname, units={'x': u.kpc, 'v': u.km/u.s}):
"""Read stream star positions from a file"""
t = np.loadtxt(fname).T
n = np.shape(t)[1]
ns = int((n-1)/2)
self.setup['nstars'] = ns
# progenitor
self.setup['x0'] = t[:3,0] * units['x']
self.setup['v0'] = t[3:,0] * units['v']
# leading tail
self.leading = {}
self.leading['x'] = t[:3,1:ns+1] * units['x']
self.leading['v'] = t[3:,1:ns+1] * units['v']
# trailing tail
self.trailing = {}
self.trailing['x'] = t[:3,ns+1:] * units['x']
self.trailing['v'] = t[3:,ns+1:] * units['v']
def save(self, fname):
"""Save stream star positions to a file"""
# define table
t = Table(names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
# add progenitor info
t.add_row(np.ravel([self.setup['x0'].to(u.kpc).value, self.setup['v0'].to(u.km/u.s).value]))
# add leading tail infoobsmode
tt = Table(np.concatenate((self.leading['x'].to(u.kpc).value, self.leading['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vstack([t,tt])
# add trailing tail info
tt = Table(np.concatenate((self.trailing['x'].to(u.kpc).value, self.trailing['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vstack([t,tt])
# save to file
t.write(fname, format='ascii.commented_header')
# make a streakline model of a stream
def stream_model(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for i in range(3):
mock['x0'][i] += pparams0[26+i]
mock['v0'][i] += pparams0[29+i]
# vary potential parameters
potential = 'octu'
pparams = pparams0[:26]
#print(pparams[0])
pparams[0] = (10**pparams0[0].value)*pparams0[0].unit
pparams[2] = (10**pparams0[2].value)*pparams0[2].unit
#pparams[0] = pparams0[0]*1e15
#pparams[2] = pparams0[2]*1e15
#print(pparams[0])
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
################################
# Plot observed stream and model
if graph:
observed = load_stream(name)
Ndim = np.shape(observed.obs)[0]
modcol = 'k'
obscol = 'orange'
ylabel = ['Dec (deg)', 'Distance (kpc)', 'Radial velocity (km/s)']
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(12,4))
for i in range(3):
plt.sca(ax[i])
plt.gca().invert_xaxis()
plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(observed.obs[0], observed.obs[i+1], 's', color=obscol, mec='none', ms=8, label='Observed stream')
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=modcol, mec='none', ms=4, label='Fiducial model')
if i==0:
plt.legend(frameon=False, handlelength=0.5, fontsize='small')
plt.tight_layout()
if graphsave:
plt.savefig('../plots/mock_observables_{}_p{}.png'.format(name, potential), dpi=150)
return stream
def progenitor_params(n):
"""Return progenitor parameters for a given stream"""
if n==-1:
age = 1.6*u.Gyr
mi = 1e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = gd1_coordinates(observer=mw_observer)
elif n==-2:
age = 2.7*u.Gyr
mi = 1e5*u.Msun
mf = 2e4*u.Msun
x0, v0 = pal5_coordinates(observer=mw_observer, vobs=vsun0)
elif n==-3:
age = 3.5*u.Gyr
mi = 5e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = tri_coordinates(observer=mw_observer)
elif n==-4:
age = 2*u.Gyr
mi = 2e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = atlas_coordinates(observer=mw_observer)
out = {'x0': x0, 'v0': v0, 'age': age, 'mi': mi, 'mf': mf}
return out
def gal2eq(x, v, observer=mw_observer, vobs=vsun0):
""""""
# define reference frame
xgal = coord.Galactocentric(np.array(x)[:,np.newaxis]*u.kpc, **observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, np.array(v)[:,np.newaxis]*u.km/u.s, **vobs)
# store coordinates
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
xobs = [xeq.ra.to(units[0]), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vobs = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
return(xobs, vobs)
def gd1_coordinates(observer=mw_observer):
"""Approximate GD-1 progenitor coordinates"""
x = coord.SkyCoord(ra=154.377*u.deg, dec=41.5309*u.deg, distance=8.2*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-90, -250, -120]
return (x0, v0)
def pal5_coordinates(observer=mw_observer, vobs=vsun0):
"""Pal5 coordinates"""
# sdss
ra = 229.0128*u.deg
dec = -0.1082*u.deg
# bob's rrlyrae
d = 21.7*u.kpc
# harris
#d = 23.2*u.kpc
# odenkirchen 2002
vr = -58.7*u.km/u.s
# fritz & kallivayalil 2015
mua = -2.296*u.mas/u.yr
mud = -2.257*u.mas/u.yr
d = 24*u.kpc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d, **observer)
x0 = x.galactocentric
v0 = gc.vhel_to_gal(x.icrs, rv=vr, pm=[mua, mud], **vobs).to(u.km/u.s)
return ([x0.x.value, x0.y.value, x0.z.value], v0.value.tolist())
def tri_coordinates(observer=mw_observer):
"""Approximate Triangulum progenitor coordinates"""
x = coord.SkyCoord(ra=22.38*u.deg, dec=30.26*u.deg, distance=33*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-40, 155, 155]
return (x0, v0)
def atlas_coordinates(observer=mw_observer):
"""Approximate ATLAS progenitor coordinates"""
x = coord.SkyCoord(ra=20*u.deg, dec=-27*u.deg, distance=20*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [40, 150, -120]
return (x0, v0)
# great circle orientation
def find_greatcircle(stream=None, name='gd1', pparams=pparams_fid, dt=0.2*u.Myr, save=True, graph=True):
"""Save rotation matrix for a stream model"""
if stream==None:
stream = stream_model(name, pparams0=pparams, dt=dt)
# find the pole
ra = np.radians(stream.obs[0])
dec = np.radians(stream.obs[1])
rx = np.cos(ra) * np.cos(dec)
ry = np.sin(ra) * np.cos(dec)
rz = np.sin(dec)
r = np.column_stack((rx, ry, rz))
# fit the plane
x0 = np.array([0, 1, 0])
lsq = scipy.optimize.minimize(wfit_plane, x0, args=(r,))
x0 = lsq.x/np.linalg.norm(lsq.x)
ra0 = np.arctan2(x0[1], x0[0])
dec0 = np.arcsin(x0[2])
ra0 += np.pi
dec0 = np.pi/2 - dec0
# euler rotations
R0 = myutils.rotmatrix(np.degrees(-ra0), 2)
R1 = myutils.rotmatrix(np.degrees(dec0), 1)
R2 = myutils.rotmatrix(0, 2)
R = np.dot(R2, np.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
# put xi = 50 at the beginning of the stream
xi[xi>180] -= 360
xi += 360
xi0 = np.min(xi) - 50
R2 = myutils.rotmatrix(-xi0, 2)
R = np.dot(R2, np.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
if save:
np.save('../data/rotmatrix_{}'.format(name), R)
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
mock['rotmatrix'] = R
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
if graph:
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
plt.sca(ax[0])
plt.plot(stream.obs[0], stream.obs[1], 'ko')
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
plt.sca(ax[1])
plt.plot(xi, eta, 'ko')
plt.xlabel('$\\xi$ (deg)')
plt.ylabel('$\\eta$ (deg)')
plt.ylim(-5, 5)
plt.tight_layout()
plt.savefig('../plots/gc_orientation_{}.png'.format(name))
return R
def wfit_plane(x, r, p=None):
"""Fit a plane to a set of 3d points"""
Np = np.shape(r)[0]
if np.any(p)==None:
p = np.ones(Np)
Q = np.zeros((3,3))
for i in range(Np):
Q += p[i]**2 * np.outer(r[i], r[i])
x = x/np.linalg.norm(x)
lsq = np.inner(x, np.inner(Q, x))
return lsq
# observed streams
#def load_stream(n):
#"""Load stream observations"""
#if n==-1:
#observed = load_gd1(present=[0,1,2,3])
#elif n==-2:
#observed = load_pal5(present=[0,1,2,3])
#elif n==-3:
#observed = load_tri(present=[0,1,2,3])
#elif n==-4:
#observed = load_atlas(present=[0,1,2,3])
#return observed
def endpoints(name):
""""""
stream = load_stream(name)
# find endpoints
amin = np.argmin(stream.obs[0])
amax = np.argmax(stream.obs[0])
ra = np.array([stream.obs[0][i] for i in [amin, amax]])
dec = np.array([stream.obs[1][i] for i in [amin, amax]])
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
# rotate endpoints
R = mock['rotmatrix']
xi, eta = myutils.rotate_angles(ra, dec, R)
#xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
mock['ra_range'] = ra
mock['xi_range'] = xi #np.percentile(xi, [10,90])
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
def load_pal5(present, nobs=50, potential='gal'):
""""""
if len(present)==2:
t = Table.read('../data/pal5_members.txt', format='ascii.commented_header')
dist = 21.7
deltadist = 0.7
np.random.seed(34)
t = t[np.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = np.random.randn(nobs)*deltadist + dist
obs = np.array([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.repeat( np.array([2e-4, 2e-4, 0.7]), nobs ).reshape(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==3:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_gd1(present, nobs=50, potential='gal'):
""""""
if len(present)==3:
t = Table.read('../data/gd1_members.txt', format='ascii.commented_header')
dist = 0
deltadist = 0.5
np.random.seed(34)
t = t[np.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = np.random.randn(nobs)*deltadist + dist
d += t['l']*0.04836 + 9.86
obs = np.array([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.repeat( np.array([2e-4, 2e-4, 0.5]), nobs ).reshape(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/gd1_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/gd1_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
ind = np.all(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs#[np.array(present)]
observed.obsunit = obsunit
observed.err = err#[np.array(present)]
observed.obserror = obserr
return observed
def load_tri(present, nobs=50, potential='gal'):
""""""
if len(present)==4:
t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
if len(present)==3:
t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
ind = np.all(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_atlas(present, nobs=50, potential='gal'):
""""""
ra, dec = atlas_track()
n = np.size(ra)
d = np.random.randn(n)*2 + 20
obs = np.array([ra, dec, d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([np.ones(n)*0.05, np.ones(n)*0.05, np.ones(n)*2])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def atlas_track():
""""""
ra0, dec0 = np.radians(77.16), np.radians(46.92 - 90)
# euler rotations
D = np.array([[np.cos(ra0), np.sin(ra0), 0], [-np.sin(ra0), np.cos(ra0), 0], [0, 0, 1]])
C = np.array([[np.cos(dec0), 0, np.sin(dec0)], [0, 1, 0], [-np.sin(dec0), 0, np.cos(dec0)]])
B = np.diag(np.ones(3))
R = np.dot(B, np.dot(C, D))
Rinv = np.linalg.inv(R)
l0 = np.linspace(0, 2*np.pi, 500)
b0 = np.zeros(500)
xeq, yeq, zeq = myutils.eq2car(l0, b0)
eq = np.column_stack((xeq, yeq, zeq))
eq_rot = np.zeros(np.shape(eq))
for i in range(np.size(l0)):
eq_rot[i] = np.dot(Rinv, eq[i])
l0_rot, b0_rot = myutils.car2eq(eq_rot[:, 0], eq_rot[:, 1], eq_rot[:, 2])
ra_s, dec_s = np.degrees(l0_rot), np.degrees(b0_rot)
ind_s = (ra_s>17) & (ra_s<30)
ra_s = ra_s[ind_s]
dec_s = dec_s[ind_s]
return (ra_s, dec_s)
def fancy_name(n):
"""Return nicely formatted stream name"""
names = {-1: 'GD-1', -2: 'Palomar 5', -3: 'Triangulum', -4: 'ATLAS'}
return names[n]
# model parameters
def get_varied_pars(vary):
"""Return indices and steps for a preset of varied parameters, and a label for varied parameters
Parameters:
vary - string setting the parameter combination to be varied, options: 'potential', 'progenitor', 'halo', or a list thereof"""
if type(vary) is not list:
vary = [vary]
Nt = len(vary)
vlabel = '_'.join(vary)
pid = []
dp = []
for v in vary:
o1, o2 = get_varied_bytype(v)
pid += o1
dp += o2
return (pid, dp, vlabel)
def get_varied_bytype(vary):
"""Get varied parameter of a particular type"""
if vary=='potential':
pid = [5,6,8,10,11]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1), 0.4e11*u.Msun]
elif vary=='bary':
pid = [0,1,2,3,4]
# gd1
dp = [1e-1*u.Msun, 0.005*u.kpc, 1e-1*u.Msun, 0.002*u.kpc, 0.002*u.kpc]
## atlas & triangulum
#dp = [0.4e5*u.Msun, 0.0005*u.kpc, 0.5e6*u.Msun, 0.0002*u.kpc, 0.002*u.kpc]
# pal5
dp = [1e-2*u.Msun, 0.000005*u.kpc, 1e-2*u.Msun, 0.000002*u.kpc, 0.00002*u.kpc]
dp = [1e-7*u.Msun, 0.5*u.kpc, 1e-7*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
dp = [1e-2*u.Msun, 0.5*u.kpc, 1e-2*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
elif vary=='halo':
pid = [5,6,8,10]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
dp = [35*u.km/u.s, 2.9*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
elif vary=='progenitor':
pid = [26,27,28,29,30,31]
dp = [1*u.deg, 1*u.deg, 0.5*u.kpc, 20*u.km/u.s, 0.3*u.mas/u.yr, 0.3*u.mas/u.yr]
elif vary=='dipole':
pid = [11,12,13]
#dp = [1e-11*u.Unit(1), 1e-11*u.Unit(1), 1e-11*u.Unit(1)]
dp = [0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2]
elif vary=='quad':
pid = [14,15,16,17,18]
dp = [0.5*u.Gyr**-2 for x in range(5)]
elif vary=='octu':
pid = [19,20,21,22,23,24,25]
dp = [0.001*u.Gyr**-2*u.kpc**-1 for x in range(7)]
else:
pid = []
dp = []
return (pid, dp)
def get_parlabel(pid):
"""Return label for a list of parameter ids
Parameter:
pid - list of parameter ids"""
master = ['log $M_b$', '$a_b$', 'log $M_d$', '$a_d$', '$b_d$', '$V_h$', '$R_h$', '$\phi$', '$q_x$', '$q_y$', '$q_z$', '$a_{1,-1}$', '$a_{1,0}$', '$a_{1,1}$', '$a_{2,-2}$', '$a_{2,-1}$', '$a_{2,0}$', '$a_{2,1}$', '$a_{2,2}$', '$a_{3,-3}$', '$a_{3,-2}$', '$a_{3,-1}$', '$a_{3,0}$', '$a_{3,1}$', '$a_{3,2}$', '$a_{3,3}$', '$RA_p$', '$Dec_p$', '$d_p$', '$V_{r_p}$', '$\mu_{\\alpha_p}$', '$\mu_{\delta_p}$', ]
master_units = ['dex', 'kpc', 'dex', 'kpc', 'kpc', 'km/s', 'kpc', 'rad', '', '', '', 'pc/Myr$^2$', 'pc/Myr$^2$', 'pc/Myr$^2$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'deg', 'deg', 'kpc', 'km/s', 'mas/yr', 'mas/yr', ]
if type(pid) is list:
labels = []
units = []
for i in pid:
labels += [master[i]]
units += [master_units[i]]
else:
labels = master[pid]
units = master_units[pid]
return (labels, units)
def get_steps(Nstep=50, log=False):
"""Return deltax steps in both directions
Paramerets:
Nstep - number of steps in one direction (default: 50)
log - if True, steps are logarithmically spaced (default: False)"""
if log:
step = np.logspace(-10, 1, Nstep)
else:
step = np.linspace(0.1, 10, Nstep)
step = np.concatenate([-step[::-1], step])
return (Nstep, step)
def lmc_position():
""""""
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d)
xgal = [x.galactocentric.x.si, x.galactocentric.y.si, x.galactocentric.z.si]
print(xgal)
def lmc_properties():
""""""
# penarrubia 2016
mass = 2.5e11*u.Msun
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
c1 = coord.SkyCoord(ra=ra, dec=dec, distance=d)
cgal1 = c1.transform_to(coord.Galactocentric)
xgal = np.array([cgal1.x.to(u.kpc).value, cgal1.y.to(u.kpc).value, cgal1.z.to(u.kpc).value])*u.kpc
return (mass, xgal)
# fit bspline to a stream model
def fit_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit bspline to a stream model and save to file"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = np.argsort(stream.obs[0])
ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs)
t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
np.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
fidsort = np.argsort(stream_fid.obs[0])
ra = np.linspace(np.min(stream_fid.obs[0])*1.05, np.max(stream_fid.obs[0])*0.95, Nobs)
tfid = np.r_[(stream_fid.obs[0][fidsort][0],)*(k+1), ra, (stream_fid.obs[0][fidsort][-1],)*(k+1)]
llabel = 'b-spline fit'
else:
llabel = ''
plt.close()
fig, ax = plt.subplots(2,5,figsize=(20,5), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim-1):
plt.sca(ax[0][i])
plt.plot(stream.obs[0], stream.obs[i+1], 'ko')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]), 'r-', lw=2, label=llabel)
if fiducial:
fits_fid = scipy.interpolate.make_lsq_spline(stream_fid.obs[0][fidsort], stream_fid.obs[i+1][fidsort], tfid, k=k)
plt.plot(stream_fid.obs[0], stream_fid.obs[i+1], 'wo', mec='k', alpha=0.1)
plt.plot(stream_fid.obs[0][fidsort], fits_fid(stream_fid.obs[0][fidsort]), 'b-', lw=2, label='Fiducial')
plt.ylabel(ylabel[i+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[i][0], ylims[i][1])
plt.sca(ax[1][i])
if fiducial:
yref = fits_fid(stream.obs[0])
ycolor = 'b'
else:
yref = fits[i](stream.obs[0])
ycolor = 'r'
plt.axhline(0, color=ycolor, lw=2)
if fiducial: plt.plot(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], 'wo', mec='k', alpha=0.1)
plt.plot(stream.obs[0], stream.obs[i+1] - yref, 'ko')
if fiducial:
fits_diff = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], t, k=k)
plt.plot(stream.obs[0][isort], fits_diff(stream.obs[0][isort]), 'r--')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]) - yref[isort], 'r-', lw=2, label=llabel)
plt.xlabel(ylabel[0])
plt.ylabel('$\Delta$ {}'.format(ylabel[i+1].split(' ')[0]))
if fiducial:
plt.sca(ax[0][Ndim-2])
plt.legend(fontsize='small')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
def fitbyt_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit each tail individually"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = np.argsort(stream.obs[0])
ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs)
t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
np.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
plt.close()
fig, ax = plt.subplots(2,Ndim,figsize=(20,4), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim):
plt.sca(ax[0][i])
Nhalf = int(0.5*np.size(stream.obs[i]))
plt.plot(stream.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:], 'o')
if fiducial:
plt.plot(stream_fid.obs[i][:Nhalf], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.plot(stream_fid.obs[i][Nhalf:], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.ylabel(ylabel[i])
plt.sca(ax[1][i])
if fiducial:
plt.plot(stream.obs[i][:Nhalf] - stream_fid.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:] - stream_fid.obs[i][Nhalf:], 'o')
if fiducial:
plt.sca(ax[0][Ndim-1])
plt.legend(fontsize='small')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
else:
return fig
def get_stream_limits(n, align=False):
"""Return lists with limiting values in different dimensions"""
if n==-1:
xlims = [260, 100]
ylims = [[-20, 70], [5, 15], [-400, 400], [-15,5], [-15, 5]]
elif n==-2:
xlims = [250, 210]
ylims = [[-20, 15], [17, 27], [-80, -20], [-5,0], [-5, 0]]
elif n==-3:
xlims = [27, 17]
ylims = [[10, 50], [34, 36], [-175, -50], [0.45, 1], [0.1, 0.7]]
elif n==-4:
xlims = [35, 10]
ylims = [[-40, -20], [15, 25], [50, 200], [-0.5,0.5], [-1.5, -0.5]]
if align:
ylims[0] = [-5, 5]
xup = [110, 110, 80, 80]
xlims = [xup[np.abs(n)-1], 40]
return (xlims, ylims)
# step sizes for derivatives
def iterate_steps(n):
"""Calculate derivatives for different parameter classes, and plot"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
step_convergence(n, Nstep=10, vary=vary)
choose_step(n, Nstep=10, vary=vary)
def iterate_plotsteps(n):
"""Plot stream models for a variety of model parameters"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
pid, dp, vlabel = get_varied_pars(vary)
for p in range(len(pid)):
plot_steps(n, p=p, Nstep=5, vary=vary, log=False)
def plot_steps(n, p=0, Nstep=20, log=True, dt=0.2*u.Myr, vary='halo', verbose=False, align=True, observer=mw_observer, vobs=vsun):
"""Plot stream for different values of a potential parameter"""
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
pparams0 = pparams_fid
pid, dp, vlabel = get_varied_pars(vary)
plabel, punit = get_parlabel(pid[p])
Nstep, step = get_steps(Nstep=Nstep, log=log)
plt.close()
fig, ax = plt.subplots(5,5,figsize=(20,10), sharex=True, gridspec_kw = {'height_ratios':[3, 1, 1, 1, 1]})
# fiducial model
stream0 = stream_model(n, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix, observer=observer, vobs=vobs)
Nobs = 10
k = 3
isort = np.argsort(stream0.obs[0])
ra = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, Nobs)
t = np.r_[(stream0.obs[0][isort][0],)*(k+1), ra, (stream0.obs[0][isort][-1],)*(k+1)]
fits = [None]*5
for j in range(5):
fits[j] = scipy.interpolate.make_lsq_spline(stream0.obs[0][isort], stream0.obs[j+1][isort], t, k=k)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
color = mpl.cm.RdBu(i/(2*Nstep-1))
#print(i, dp[p], pparams)
# fits
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
plt.sca(ax[0][j])
plt.plot(stream.obs[0], stream.obs[j+1], 'o', color=color, ms=2)
plt.sca(ax[1][j])
plt.plot(stream.obs[0], stream.obs[j+1] - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[2][j])
plt.plot(stream.obs[0], fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[3][j])
plt.plot(stream.obs[0], (fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]))/(s*dp[p]), 'o', color=color, ms=2)
# symmetric derivatives
ra_der = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, 100)
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx = -dy / np.abs(2*step[i]*dp[p])
plt.sca(ax[4][j])
plt.plot(ra_der, dydx, '-', color=color, lw=2, zorder=Nstep-i)
# labels, limits
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
for j in range(5):
plt.sca(ax[0][j])
plt.ylabel(ylabel[j+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[j][0], ylims[j][1])
plt.sca(ax[1][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0]))
plt.sca(ax[2][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0]))
plt.sca(ax[3][j])
plt.ylabel('$\Delta${}/$\Delta${}'.format(ylabel[j+1].split(' ')[0], plabel))
plt.sca(ax[4][j])
plt.xlabel(ylabel[0])
plt.ylabel('$\langle$$\Delta${}/$\Delta${}$\\rangle$'.format(ylabel[j+1].split(' ')[0], plabel))
#plt.suptitle('Varying {}'.format(plabel), fontsize='small')
plt.tight_layout()
plt.savefig('../plots/observable_steps_{:d}_{:s}_p{:d}_Ns{:d}.png'.format(n, vlabel, p, Nstep))
def step_convergence(name='gd1', Nstep=20, log=True, layer=1, dt=0.2*u.Myr, vary='halo', align=True, graph=False, verbose=False, Nobs=10, k=3, ra_der=np.nan, Nra=50):
"""Check deviations in numerical derivatives for consecutive step sizes"""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = np.eye(3)
xmm = mock['ra_range']
# fiducial model
pparams0 = pparams_fid
stream0 = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
if np.any(~np.isfinite(ra_der)):
ra_der = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nra)
Nra = np.size(ra_der)
# parameters to vary
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
dpvec = np.array([x.value for x in dp])
Nstep, step = get_steps(Nstep=Nstep, log=log)
dydx_all = np.empty((Np, Nstep, 5, Nra))
dev_der = np.empty((Np, Nstep-2*layer))
step_der = np.empty((Np, Nstep-2*layer))
for p in range(Np):
plabel = get_parlabel(pid[p])
if verbose: print(p, plabel)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
if verbose: print(i, s)
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# fits
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
# symmetric derivatives
dydx = np.empty((Nstep, 5, Nra))
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx[i][j] = -dy / np.abs(2*step[i]*dp[p])
dydx_all[p] = dydx
# deviations from adjacent steps
step_der[p] = -step[layer:Nstep-layer] * dp[p]
for i in range(layer, Nstep-layer):
dev_der[p][i-layer] = 0
for j in range(5):
for l in range(layer):
dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i-l-1][j])**2)
dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i+l+1][j])**2)
np.savez('../data/step_convergence_{}_{}_Ns{}_log{}_l{}'.format(name, vlabel, Nstep, log, layer), step=step_der, dev=dev_der, ders=dydx_all, steps_all=np.outer(dpvec,step[Nstep:]))
if graph:
plt.close()
fig, ax = plt.subplots(1,Np,figsize=(4*Np,4))
for p in range(Np):
plt.sca(ax[p])
plt.plot(step_der[p], dev_der[p], 'ko')
#plabel = get_parlabel(pid[p])
#plt.xlabel('$\Delta$ {}'.format(plabel))
plt.ylabel('D')
plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def choose_step(name='gd1', tolerance=2, Nstep=20, log=True, layer=1, vary='halo'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
plabels, units = get_parlabel(pid)
punits = ['({})'.format(x) if len(x) else '' for x in units]
t = np.load('../data/step_convergence_{}_{}_Ns{}_log{}_l{}.npz'.format(name, vlabel, Nstep, log, layer))
dev = t['dev']
step = t['step']
dydx = t['ders']
steps_all = t['steps_all'][:,::-1]
Nra = np.shape(dydx)[-1]
best = np.empty(Np)
# plot setup
da = 4
nrow = 2
ncol = Np
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(da*ncol, da*1.3), squeeze=False, sharex='col', gridspec_kw = {'height_ratios':[1.2, 3]})
for p in range(Np):
# choose step
dmin = np.min(dev[p])
dtol = tolerance * dmin
opt_step = np.min(step[p][dev[p]<dtol])
opt_id = step[p]==opt_step
best[p] = opt_step
## largest step w deviation smaller than 1e-4
#opt_step = np.max(step[p][dev[p]<1e-4])
#opt_id = step[p]==opt_step
#best[p] = opt_step
plt.sca(ax[0][p])
for i in range(5):
for j in range(10):
plt.plot(steps_all[p], np.tanh(dydx[p,:,i,np.int64(j*Nra/10)]), '-', color='{}'.format(i/5), lw=0.5, alpha=0.5)
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.ylim(-1,1)
plt.ylabel('Derivative')
plt.title('{}'.format(plabels[p])+'$_{best}$ = '+'{:2.2g}'.format(opt_step), fontsize='small')
plt.sca(ax[1][p])
plt.plot(step[p], dev[p], 'ko')
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.plot(step[p][opt_id], dev[p][opt_id], 'ro')
plt.axhline(dtol, ls='-', color='orange', lw=1)
y0, y1 = plt.gca().get_ylim()
plt.axhspan(y0, dtol, color='orange', alpha=0.3, zorder=0)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.xlabel('$\Delta$ {} {}'.format(plabels[p], punits[p]))
plt.ylabel('Derivative deviation')
np.save('../data/optimal_step_{}_{}'.format(name, vlabel), best)
plt.tight_layout(h_pad=0)
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def read_optimal_step(name, vary, equal=False):
"""Return optimal steps for a range of parameter types"""
if type(vary) is not list:
vary = [vary]
dp = np.empty(0)
for v in vary:
dp_opt = np.load('../data/optimal_step_{}_{}.npy'.format(name, v))
dp = np.concatenate([dp, dp_opt])
if equal:
dp = np.array([0.05, 0.05, 0.2, 1, 0.01, 0.01, 0.05, 0.1, 0.05, 0.1, 0.1, 10, 1, 0.01, 0.01])
return dp
def visualize_optimal_steps(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, dt=0.2*u.Myr, Nobs=50, k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = np.eye(3)
xmm = mock['ra_range']
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fiducial = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
iexsort = np.argsort(fiducial.obs[0])
raex = np.linspace(np.percentile(fiducial.obs[0], 10), np.percentile(fiducial.obs[0], 90), Nobs)
tex = np.r_[(fiducial.obs[0][iexsort][0],)*(k+1), raex, (fiducial.obs[0][iexsort][-1],)*(k+1)]
fit = scipy.interpolate.make_lsq_spline(fiducial.obs[0][iexsort], fiducial.obs[1][iexsort], tex, k=k)
nrow = 2
ncol = np.int64((Np+1)/nrow)
da = 4
c = ['b', 'b', 'b', 'r', 'r', 'r']
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(ncol*da, nrow*da), squeeze=False)
for p in range(Np):
plt.sca(ax[p%2][int(p/2)])
for i, s in enumerate([-1.1, -1, -0.9, 0.9, 1, 1.1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fitex = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[1][iexsort], tex, k=k)
plt.plot(raex, fitex(raex) - fit(raex), '-', color=c[i])
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
#print(get_parlabel(p))
plt.title('$\Delta$ {} = {:.2g}'.format(get_parlabel(p)[0], dp[p]), fontsize='medium')
plt.tight_layout()
plt.savefig('../plots/{}_optimal_steps.png'.format(name), dpi=200)
# observing modes
def define_obsmodes():
"""Output a pickled dictionary with typical uncertainties and dimensionality of data for a number of observing modes"""
obsmodes = {}
obsmodes['fiducial'] = {'sig_obs': np.array([0.1, 2, 5, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['binospec'] = {'sig_obs': np.array([0.1, 2, 10, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['hectochelle'] = {'sig_obs': np.array([0.1, 2, 1, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['desi'] = {'sig_obs': np.array([0.1, 2, 10, np.nan, np.nan]), 'Ndim': [4,]}
obsmodes['gaia'] = {'sig_obs': np.array([0.1, 0.2, 10, 0.2, 0.2]), 'Ndim': [6,]}
obsmodes['exgal'] = {'sig_obs': np.array([0.5, np.nan, 20, np.nan, np.nan]), 'Ndim': [3,]}
pickle.dump(obsmodes, open('../data/observing_modes.info','wb'))
def obsmode_name(mode):
"""Return full name of the observing mode"""
if type(mode) is not list:
mode = [mode]
full_names = {'fiducial': 'Fiducial',
'binospec': 'Binospec',
'hectochelle': 'Hectochelle',
'desi': 'DESI-like',
'gaia': 'Gaia-like',
'exgal': 'Extragalactic'}
keys = full_names.keys()
names = []
for m in mode:
if m in keys:
name = full_names[m]
else:
name = m
names += [name]
return names
# crbs using bspline
def calculate_crb(name='gd1', dt=0.2*u.Myr, vary=['progenitor', 'bary', 'halo'], ra=np.nan, dd=0.5, Nmin=15, verbose=False, align=True, scale=False, errmode='fiducial', k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = np.sort(mock['xi_range'])
else:
rotmatrix = np.eye(3)
xmm = np.sort(mock['ra_range'])
# typical uncertainties and data availability
obsmodes = pickle.load(open('../data/observing_modes.info', 'rb'))
if errmode not in obsmodes.keys():
errmode = 'fiducial'
sig_obs = obsmodes[errmode]['sig_obs']
data_dim = obsmodes[errmode]['Ndim']
# mock observations
if np.any(~np.isfinite(ra)):
if (np.int64((xmm[1]-xmm[0])/dd + 1) < Nmin):
dd = (xmm[1]-xmm[0])/Nmin
ra = np.arange(xmm[0], xmm[1]+dd, dd)
#ra = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nobs)
#else:
Nobs = np.size(ra)
print(name, Nobs)
err = np.tile(sig_obs, Nobs).reshape(Nobs,-1)
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fits_ex = [[[None]*5 for x in range(2)] for y in range(Np)]
if scale:
dp_unit = unity_scale(dp)
dps = [x*y for x,y in zip(dp, dp_unit)]
# calculate derivatives for all parameters
for p in range(Np):
for i, s in enumerate([-1, 1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
for j in range(5):
fits_ex[p][i][j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
# populate matrix of derivatives and calculate CRB
for Ndim in data_dim:
#for Ndim in [6,]:
Ndata = Nobs * (Ndim - 1)
cyd = np.empty(Ndata)
dydx = np.empty((Np, Ndata))
dy2 = np.empty((2, Np, Ndata))
for j in range(1, Ndim):
for p in range(Np):
dy = fits_ex[p][0][j-1](ra) - fits_ex[p][1][j-1](ra)
dy2[0][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][0][j-1](ra)
dy2[1][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][1][j-1](ra)
#positive = np.abs(dy)>0
#if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dy[positive])), np.max(np.abs(dy)), np.median(np.abs(dy))))
if scale:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dps[p].value)
else:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dp[p].value)
#if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dydx[p][(j-1)*Nobs:j*Nobs][positive])), np.max(np.abs(dydx[p][(j-1)*Nobs:j*Nobs])), np.median(np.abs(dydx[p][(j-1)*Nobs:j*Nobs]))))
#print(j, p, get_parlabel(pid[p])[0], dp[p], np.min(np.abs(dy)), np.max(np.abs(dy)), np.median(dydx[p][(j-1)*Nobs:j*Nobs]))
cyd[(j-1)*Nobs:j*Nobs] = err[:,j-1]**2
np.savez('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dydx=dydx, y=dy2, cyd=cyd, dp=dp_opt)
# data component of the Fisher matrix
cy = np.diag(cyd)
cyi = np.diag(1. / cyd)
caux = np.matmul(cyi, dydx.T)
dxi = np.matmul(dydx, caux)
# component based on prior knowledge of model parameters
pxi = priors(name, vary)
# full Fisher matrix
cxi = dxi + pxi
if verbose:
cx = np.linalg.inv(cxi)
cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers
sx = np.sqrt(np.diag(cx))
print('CRB', sx)
print('condition {:g}'.format(np.linalg.cond(cxi)))
print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
cx = stable_inverse(cxi)
print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
np.savez('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), cxi=cxi, dxi=dxi, pxi=pxi)
def priors(name, vary):
"""Return covariance matrix with prior knowledge about parameters"""
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
cprog = mock['prog_prior']
cbary = np.array([0.1*x.value for x in pparams_fid[:5]])**-2
chalo = np.zeros(4)
cdipole = np.zeros(3)
cquad = np.zeros(5)
coctu = np.zeros(7)
priors = {'progenitor': cprog, 'bary': cbary, 'halo': chalo, 'dipole': cdipole, 'quad': cquad, 'octu': coctu}
cprior = np.empty(0)
for v in vary:
cprior = np.concatenate([cprior, priors[v]])
pxi = np.diag(cprior)
return pxi
def scale2invert(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], verbose=False, align=True, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
#dp = read_optimal_step(name, vary)
d = np.load('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
dydx = d['dydx']
cyd = d['cyd']
y = d['y']
dp = d['dp']
dy = (y[1,:,:] - y[0,:,:])
dydx = (y[1,:,:] - y[0,:,:]) / (2*dp[:,np.newaxis])
scaling_par = np.median(np.abs(dydx), axis=1)
dydx = dydx / scaling_par[:,np.newaxis]
dydx_ = np.reshape(dydx, (len(dp), Ndim-1, -1))
scaling_dim = np.median(np.abs(dydx_), axis=(2,0))
dydx_ = dydx_ / scaling_dim[np.newaxis,:,np.newaxis]
cyd_ = np.reshape(cyd, (Ndim-1, -1))
cyd_ = cyd_ / scaling_dim[:,np.newaxis]
cyd = np.reshape(cyd_, (-1))
dydx = np.reshape(dydx_, (len(dp), -1))
mmin = np.min(np.abs(dy), axis=0)
mmax = np.max(np.abs(dy), axis=0)
mmed = np.median(np.abs(dydx), axis=1)
dyn_range = mmax/mmin
#print(dyn_range)
print(np.min(dyn_range), np.max(dyn_range), np.std(dyn_range))
cy = np.diag(cyd)
cyi = np.diag(1. / cyd)
caux = np.matmul(cyi, dydx.T)
cxi = np.matmul(dydx, caux)
print('condition {:e}'.format(np.linalg.cond(cxi)))
cx = np.linalg.inv(cxi)
cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers
print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
cx = stable_inverse(cxi, maxiter=30)
print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
def unity_scale(dp):
""""""
dim_scale = 10**np.array([2, 3, 3, 2, 4, 3, 7, 7, 5, 7, 7, 4, 4, 4, 4, 3, 3, 3, 4, 3, 4, 4, 4])
dim_scale = 10**np.array([3, 2, 3, 4, 0, 2, 2, 3, 2, 2, 2, 4, 3, 2, 2, 3])
#dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3])
#dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3])
dp_unit = [(dp[x].value*dim_scale[x])**-1 for x in range(len(dp))]
return dp_unit
def test_inversion(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], align=True, errmode='fiducial'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
d = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = d['cxi']
N = np.shape(cxi)[0]
cx_ = np.linalg.inv(cxi)
cx = stable_inverse(cxi, verbose=True, maxiter=100)
#cx_ii = stable_inverse(cx, verbose=True, maxiter=50)
print('condition {:g}'.format(np.linalg.cond(cxi)))
print('linalg inverse', np.allclose(np.matmul(cx_,cxi), np.eye(N)))
print('stable inverse', np.allclose(np.matmul(cx,cxi), np.eye(N)))
#print(np.matmul(cx,cxi))
#print('inverse inverse', np.allclose(cx_ii, cxi))
def stable_inverse(a, maxiter=20, verbose=False):
"""Invert a matrix with a bad condition number"""
N = np.shape(a)[0]
# guess
q = np.linalg.inv(a)
qa = np.matmul(q,a)
# iterate
for i in range(maxiter):
if verbose: print(i, np.sqrt(np.sum((qa - np.eye(N))**2)), np.allclose(qa, np.eye(N)))
if np.allclose(qa, np.eye(N)):
return q
qai = np.linalg.inv(qa)
q = np.matmul(qai,q)
qa = np.matmul(q,a)
return q
def crb_triangle(n, vary, Ndim=6, align=True, plot='all', fast=False):
""""""
pid, dp, vlabel = get_varied_pars(vary)
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
if align:
alabel = '_align'
else:
alabel = ''
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
#print(cx[0][0])
if plot=='halo':
cx = cx[:4, :4]
params = params[:4]
elif plot=='bary':
cx = cx[4:9, 4:9]
params = params[4:9]
elif plot=='progenitor':
cx = cx[9:, 9:]
params = params[9:]
Nvar = len(params)
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arccos(v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.5), lw=2)
plt.gca().add_patch(e)
plt.gca().autoscale_view()
#plt.xlim(-ylim[i],ylim[i])
#plt.ylim(-ylim[j], ylim[j])
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.tight_layout()
plt.savefig('../plots/crb_triangle_{:s}_{:d}_{:s}_{:d}_{:s}.pdf'.format(alabel, n, vlabel, Ndim, plot))
def crb_triangle_alldim(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, plot='all', fast=False, scale=False, errmode='fiducial'):
"""Show correlations in CRB between a chosen set of parameters in a triangle plot"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
if scale:
dp_unit = unity_scale(dp)
#print(dp_unit)
dp_unit = dp_unit[i0:i1]
pid = pid[i0:i1]
label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for l, Ndim in enumerate([3, 4, 6]):
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
#cxi = np.load('../data/crb/bspline_cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npy'.format(errmode, Ndim, name, align, vlabel))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l])
plt.gca().add_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('../plots/cxi_{:s}_{:s}_a{:1d}_{:s}_{:s}.pdf'.format(errmode, name, align, vlabel, plot))
def compare_optimal_steps():
""""""
vary = ['progenitor', 'bary', 'halo', 'dipole', 'quad']
vary = ['progenitor', 'bary', 'halo']
for name in ['gd1', 'tri']:
print(name)
print(read_optimal_step(name, vary))
def get_crb(name, Nstep=10, vary=['progenitor', 'bary', 'halo'], first=True):
""""""
if first:
store_progparams(name)
wrap_angles(name, save=True)
progenitor_prior(name)
find_greatcircle(name=name)
endpoints(name)
for v in vary:
step_convergence(name=name, Nstep=Nstep, vary=v)
choose_step(name=name, Nstep=Nstep, vary=v)
calculate_crb(name=name, vary=vary, verbose=True)
crb_triangle_alldim(name=name, vary=vary)
########################
# cartesian coordinates
# accelerations
def acc_kepler(x, p=1*u.Msun):
"""Keplerian acceleration"""
r = np.linalg.norm(x)*u.kpc
a = -G * p * 1e11 * r**-3 * x
return a.to(u.pc*u.Myr**-2)
def acc_bulge(x, p=[pparams_fid[j] for j in range(2)]):
""""""
r = np.linalg.norm(x)*u.kpc
a = -(G*p[0]*x/(r * (r + p[1])**2)).to(u.pc*u.Myr**-2)
return a
def acc_disk(x, p=[pparams_fid[j] for j in range(2,5)]):
""""""
R = np.linalg.norm(x[:2])*u.kpc
z = x[2]
a = -(G*p[0]*x * (R**2 + (p[1] + np.sqrt(z**2 + p[2]**2))**2)**-1.5).to(u.pc*u.Myr**-2)
a[2] *= (1 + p[2]/np.sqrt(z**2 + p[2]**2))
return a
def acc_nfw(x, p=[pparams_fid[j] for j in [5,6,8,10]]):
""""""
r = np.linalg.norm(x)*u.kpc
q = np.array([1*u.Unit(1), p[2], p[3]])
a = (p[0]**2 * p[1] * r**-3 * (1/(1+p[1]/r) - np.log(1+r/p[1])) * x * q**-2).to(u.pc*u.Myr**-2)
return a
def acc_dipole(x, p=[pparams_fid[j] for j in range(11,14)]):
"""Acceleration due to outside dipole perturbation"""
pv = [x.value for x in p]
a = np.sqrt(3/(4*np.pi)) * np.array([pv[2], pv[0], pv[1]])*u.pc*u.Myr**-2
return a
def acc_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Acceleration due to outside quadrupole perturbation"""
a = np.zeros(3)*u.pc*u.Myr**-2
f = 0.5*np.sqrt(15/np.pi)
a[0] = x[0]*(f*p[4] - f/np.sqrt(3)*p[2]) + x[1]*f*p[0] + x[2]*f*p[3]
a[1] = x[0]*f*p[0] - x[1]*(f*p[4] + f/np.sqrt(3)*p[2]) + x[2]*f*p[1]
a[2] = x[0]*f*p[3] + x[1]*f*p[1] + x[2]*2*f/np.sqrt(3)*p[2]
return a.to(u.pc*u.Myr**-2)
def acc_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Acceleration due to outside octupole perturbation"""
a = np.zeros(3)*u.pc*u.Myr**-2
f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))])
xu = x.unit
pu = p[0].unit
pvec = np.array([i.value for i in p]) * pu
dmat = np.ones((3,7)) * f * pvec * xu**2
x = np.array([i.value for i in x])
dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
a = np.einsum('ij->i', dmat) * dmat.unit
return a.to(u.pc*u.Myr**-2)
# derivatives
def der_kepler(x, p=1*u.Msun):
"""Derivative of Kepler potential parameters wrt cartesian components of the acceleration"""
r = np.linalg.norm(x)*u.kpc
dmat = np.zeros((3,1)) * u.pc**-1 * u.Myr**2 * u.Msun
dmat[:,0] = (-r**3/(G*x)).to(u.pc**-1 * u.Myr**2 * u.Msun) * 1e-11
return dmat.value
def pder_kepler(x, p=1*u.Msun):
"""Derivative of cartesian components of the acceleration wrt to Kepler potential parameter"""
r = np.linalg.norm(x)*u.kpc
dmat = np.zeros((3,1)) * u.pc * u.Myr**-2 * u.Msun**-1
dmat[:,0] = (-G*x*r**-3).to(u.pc * u.Myr**-2 * u.Msun**-1) * 1e11
return dmat.value
def pder_nfw(x, pu=[pparams_fid[j] for j in [5,6,8,10]]):
"""Calculate derivatives of cartesian components of the acceleration wrt halo potential parameters"""
p = pu
q = np.array([1, p[2], p[3]])
# physical quantities
r = np.linalg.norm(x)*u.kpc
a = acc_nfw(x, p=pu)
# derivatives
dmat = np.zeros((3, 4))
# Vh
dmat[:,0] = 2*a/p[0]
# Rh
dmat[:,1] = a/p[1] + p[0]**2 * p[1] * r**-3 * (1/(p[1]+p[1]**2/r) - 1/(r*(1+p[1]/r)**2)) * x * q**-2
# qy, qz
for i in [1,2]:
dmat[i,i+1] = (-2*a[i]/q[i]).value
return dmat
def pder_bulge(x, pu=[pparams_fid[j] for j in range(2)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Hernquist bulge potential parameters"""
# coordinates
r = np.linalg.norm(x)*u.kpc
# accelerations
ab = acc_bulge(x, p=pu[:2])
# derivatives
dmat = np.zeros((3, 2))
# Mb
dmat[:,0] = ab/pu[0]
# ab
dmat[:,1] = 2 * ab / (r + pu[1])
return dmat
def pder_disk(x, pu=[pparams_fid[j] for j in range(2,5)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Miyamoto-Nagai disk potential parameters"""
# coordinates
R = np.linalg.norm(x[:2])*u.kpc
z = x[2]
aux = np.sqrt(z**2 + pu[2]**2)
# accelerations
ad = acc_disk(x, p=pu)
# derivatives
dmat = np.zeros((3, 3))
# Md
dmat[:,0] = ad / pu[0]
# ad
dmat[:,1] = 3 * ad * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2)
# bd
dmat[:2,2] = 3 * ad[:2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux
dmat[2,2] = (3 * ad[2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux - G * pu[0] * z * (R**2 + (pu[1] + aux)**2)**-1.5 * z**2 * (pu[2]**2 + z**2)**-1.5).value
return dmat
def der_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of dipole potential parameters wrt (Cartesian) components of the acceleration vector a"""
# shape: 3, Npar
dmat = np.zeros((3,3))
f = np.sqrt((4*np.pi)/3)
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def pder_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of (Cartesian) components of the acceleration vector a wrt dipole potential parameters"""
# shape: 3, Npar
dmat = np.zeros((3,3))
f = np.sqrt(3/(4*np.pi))
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def der_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of quadrupole potential parameters wrt (Cartesian) components of the acceleration vector a"""
f = 2/np.sqrt(15/np.pi)
s = np.sqrt(3)
x = [1e-3/i.value for i in x]
dmat = np.ones((3,5)) * f
dmat[0] = np.array([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] = np.array([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] = np.array([0, x[1], 0.5*s*x[2], x[0], 0])
return dmat
def pder_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt quadrupole potential parameters"""
f = 0.5*np.sqrt(15/np.pi)
s = 1/np.sqrt(3)
x = [1e-3*i.value for i in x]
dmat = np.ones((3,5)) * f
dmat[0] *= np.array([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] *= np.array([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] *= np.array([0, x[1], 2*s*x[2], x[0], 0])
return dmat
def pder_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt octupole potential parameters"""
f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))])
x = [1e-3*i.value for i in x]
dmat = np.ones((3,7)) * f
dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
return dmat
def crb_ax(n, Ndim=6, vary=['halo', 'bary', 'progenitor'], align=True, fast=False):
"""Calculate CRB inverse matrix for 3D acceleration at position x in a halo potential"""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# subset halo parameters
Nhalo = 4
cq = cx[:Nhalo,:Nhalo]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
xi = np.array([-8.3, 0.1, 0.1])*u.kpc
x0, v0 = gd1_coordinates()
#xi = np.array(x0)*u.kpc
d = 50
Nb = 20
x = np.linspace(x0[0]-d, x0[0]+d, Nb)
y = np.linspace(x0[1]-d, x0[1]+d, Nb)
x = np.linspace(-d, d, Nb)
y = np.linspace(-d, d, Nb)
xv, yv = np.meshgrid(x, y)
xf = np.ravel(xv)
yf = np.ravel(yv)
af = np.empty((Nb**2, 3))
plt.close()
fig, ax = plt.subplots(3,3,figsize=(11,10))
dimension = ['x', 'y', 'z']
xlabel = ['y', 'x', 'x']
ylabel = ['z', 'z', 'y']
for j in range(3):
if j==0:
xin = np.array([np.repeat(x0[j], Nb**2), xf, yf]).T
elif j==1:
xin = np.array([xf, np.repeat(x0[j], Nb**2), yf]).T
elif j==2:
xin = np.array([xf, yf, np.repeat(x0[j], Nb**2)]).T
for i in range(Nb**2):
#xi = np.array([xf[i], yf[i], x0[2]])*u.kpc
xi = xin[i]*u.kpc
a = acc_nfw(xi)
dqda = halo_accelerations(xi)
cai = np.matmul(dqda, np.matmul(cqi, dqda.T))
if fast:
ca = np.linalg.inv(cai)
else:
ca = stable_inverse(cai)
a_crb = (np.sqrt(np.diag(ca)) * u.km**2 * u.kpc**-1 * u.s**-2).to(u.pc*u.Myr**-2)
af[i] = np.abs(a_crb/a)
af[i] = a_crb
for i in range(3):
plt.sca(ax[j][i])
im = plt.imshow(af[:,i].reshape(Nb,Nb), extent=[-d, d, -d, d], cmap=mpl.cm.gray) #, norm=mpl.colors.LogNorm(), vmin=1e-2, vmax=0.1)
plt.xlabel(xlabel[j]+' (kpc)')
plt.ylabel(ylabel[j]+' (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("top", size="4%", pad=0.05)
plt.colorbar(im, cax=cax, orientation='horizontal')
plt.gca().xaxis.set_ticks_position('top')
cax.tick_params(axis='x', labelsize='xx-small')
if j==0:
plt.title('a$_{}$'.format(dimension[i]), y=4)
plt.tight_layout(rect=[0,0,1,0.95])
plt.savefig('../plots/acc_{}_{}_{}.png'.format(n, vlabel, Ndim))
def acc_cart(x, components=['bary', 'halo', 'dipole']):
""""""
acart = np.zeros(3) * u.pc*u.Myr**-2
dict_acc = {'bary': [acc_bulge, acc_disk], 'halo': [acc_nfw], 'dipole': [acc_dipole], 'quad': [acc_quad], 'octu': [acc_octu], 'point': [acc_kepler]}
accelerations = []
for c in components:
accelerations += dict_acc[c]
for acc in accelerations:
a_ = acc(x)
acart += a_
return acart
def acc_rad(x, components=['bary', 'halo', 'dipole']):
"""Return radial acceleration"""
r = np.linalg.norm(x) * x.unit
theta = np.arccos(x[2].value/r.value)
phi = np.arctan2(x[1].value, x[0].value)
trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
a_cart = acc_cart(x, components=components)
a_rad = np.dot(a_cart, trans)
return a_rad
def ader_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = np.empty((3,0))
dict_der = {'bary': [der_bulge, der_disk], 'halo': [der_nfw], 'dipole': [der_dipole], 'quad': [der_quad], 'point': [der_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = np.hstack((dacart, da_))
return dacart
def apder_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = np.empty((3,0))
dict_der = {'bary': [pder_bulge, pder_disk], 'halo': [pder_nfw], 'dipole': [pder_dipole], 'quad': [pder_quad], 'octu': [pder_octu], 'point': [pder_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = np.hstack((dacart, da_))
return dacart
def apder_rad(x, components=['bary', 'halo', 'dipole']):
"""Return dar/dx_pot (radial acceleration/potential parameters) evaluated at vector x"""
r = np.linalg.norm(x) * x.unit
theta = np.arccos(x[2].value/r.value)
phi = np.arctan2(x[1].value, x[0].value)
trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
dadq_cart = apder_cart(x, components=components)
dadq_rad = np.einsum('ij,i->j', dadq_cart, trans)
return dadq_rad
def crb_acart(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', align=True, d=20, Nb=50, fast=False, scale=False, relative=True, progenitor=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = 3e-1
vmax = 1e1
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Npoint = [6, 5, 4, 3, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
x0, v0 = gd1_coordinates()
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Npix = np.size(xv)
af = np.empty((Npix, 3))
derf = np.empty((Npix, 3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i] = dadq
ca = np.matmul(dadq, np.matmul(cq, dadq.T))
a_crb = np.sqrt(np.diag(ca)) * u.pc * u.Myr**-2
if relative:
af[i] = np.abs(a_crb/a)
else:
af[i] = a_crb
#print(xi, a_crb)
# save
np.savez('../data/crb_acart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative), acc=af, x=xin, der=derf)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
label = ['$\Delta$ $a_X$', '$\Delta$ $a_Y$', '$\Delta$ $a_Z$']
for i in range(3):
plt.sca(ax[i])
im = plt.imshow(af[:,i].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=mpl.colors.LogNorm())
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i] + rlabel)
plt.tight_layout()
plt.savefig('../plots/crb_acc_cart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative))
def crb_acart_cov(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', j=0, align=True, d=20, Nb=30, fast=False, scale=False, relative=True, progenitor=False, batch=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = -0.005
vmax = 0.005
#vmin = 1e-2
#vmax = 1e0
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
prog_coords = {-1: gd1_coordinates(), -2: pal5_coordinates(), -3: tri_coordinates(), -4: atlas_coordinates()}
x0, v0 = prog_coords[n]
print(x0)
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Npix = np.size(xv)
af = np.empty((Npix, 3))
derf = np.empty((Npix*3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i*3:(i+1)*3] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix*3
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
## check orthogonality:
#for i in range(Npot-1):
#for k in range(i+1, Npot):
#print(i, k)
#print(np.dot(vecs[:,i], vecs[:,k]))
#print(np.dot(vecs[::3,i], vecs[::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]))
# save
np.savez('../data/crb_acart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative, progenitor), x=xin, der=derf, c=ca)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
vmin = 1e-2
vmax = 5e0
norm = mpl.colors.LogNorm()
else:
vcomb = vecs[:,j]
label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
vmin = -0.025
vmax = 0.025
norm = None
for i in range(3):
plt.sca(ax[i])
#im = plt.imshow(vecs[i::3,j].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax)
im = plt.imshow(vcomb[i::3].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=norm)
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i])
plt.tight_layout()
if batch:
return fig
else:
plt.savefig('../plots/crb_acc_cart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative, progenitor))
def a_vecfield(vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', d=20, Nb=10):
"""Plot acceleration field in R,z plane"""
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
x0 = np.array([4, 4, 0])
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0])
zin = xin[:,2]
Npix = np.size(xv)
acart_pix = np.empty((Npix, 3))
acyl_pix = np.empty((Npix, 2))
for i in range(Npix):
xi = xin[i]*u.kpc
acart = acc_cart(xi, components=components)
acart_pix[i] = acart
acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0])
acyl_pix[:,1] = acart_pix[:,2]
plt.close()
plt.figure()
plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1])
plt.tight_layout()
def a_crbcov_vecfield(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all', j=0, align=True, d=20, Nb=10, fast=False, scale=True, relative=False, progenitor=False, batch=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = -0.005
vmax = 0.005
#vmin = 1e-2
#vmax = 1e0
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
x0, v0 = gd1_coordinates()
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0])
zin = xin[:,2]
Npix = np.size(xv)
acart_pix = np.empty((Npix, 3))
acyl_pix = np.empty((Npix, 2))
vcomb_pix = np.empty((Npix, 2))
af = np.empty((Npix, 3))
derf = np.empty((Npix*3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
acart_pix[i] = a
dadq = apder_cart(xi, components=components)
derf[i*3:(i+1)*3] = dadq
acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0])
acyl_pix[:,1] = acart_pix[:,2]
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix*3
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
vmin = 1e-3
vmax = 1e-1
norm = mpl.colors.LogNorm()
else:
vcomb = vecs[:,j]*np.sqrt(vals[j])
label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
vmin = -0.025
vmax = 0.025
norm = None
vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(xin[:,0])
#vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(vcomb[0::3])
vcomb_pix[:,1] = vcomb[2::3]
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
plt.sca(ax[0])
plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1], pivot='middle')
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
plt.title('Acceleration {}'.format(component), fontsize='medium')
plt.sca(ax[1])
plt.quiver(Rin, zin, vcomb_pix[:,0], vcomb_pix[:,1], pivot='middle', headwidth=0, headlength=0, headaxislength=0, scale=0.02, scale_units='xy')
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
plt.title('Eigenvector {}'.format(np.abs(j)), fontsize='medium')
plt.tight_layout()
if batch:
return fig
else:
plt.savefig('../plots/afield_crbcov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative))
def summary(n, mode='scalar', vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
fn = {'scalar': crb_acart_cov, 'vector': a_crbcov_vecfield}
bins = {'scalar': 30, 'vector': 10}
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
Npars = {'bary': Nbary, 'halo': Nhalo, 'dipole': Ndipole, 'quad': Nquad, 'point': Npoint}
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
Niter = [Npars[x] for x in components]
Niter = sum(Niter) + 1
pp = PdfPages('../plots/acceleration_{}_{}_{}_{}_{}.pdf'.format(n, errmode, vlabel, component, mode))
for i in range(Niter):
print(i, Niter)
fig = fn[mode](-1, progenitor=True, batch=True, errmode=errmode, vary=vary, component=component, j=-i, d=20, Nb=bins[mode])
pp.savefig(fig)
pp.close()
#########
# Summary
def full_names():
""""""
full = {'gd1': 'GD-1', 'atlas': 'ATLAS', 'tri': 'Triangulum', 'ps1a': 'PS1A', 'ps1b': 'PS1B', 'ps1c': 'PS1C', 'ps1d': 'PS1D', 'ps1e': 'PS1E', 'ophiuchus': 'Ophiuchus', 'hermus': 'Hermus', 'kwando': 'Kwando', 'orinoco': 'Orinoco', 'sangarius': 'Sangarius', 'scamander': 'Scamander'}
return full
def full_name(name):
""""""
full = full_names()
return full[name]
def get_done(sort_length=False):
""""""
done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'ophiuchus', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d']
done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d']
# length
if sort_length:
tosort = []
for name in done:
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
tosort += [np.max(mock['xi_range']) - np.min(mock['xi_range'])]
done = [x for _,x in sorted(zip(tosort,done))]
else:
tosort = []
vary = ['progenitor', 'bary', 'halo']
Ndim = 6
errmode = 'fiducial'
align = True
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_vh = myutils.wherein(np.array(pid), np.array([5]))
for name in done:
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
crb = np.sqrt(np.diag(cx))
tosort += [crb[pid_vh]]
done = [x for _,x in sorted(zip(tosort,done))][::-1]
return done
def store_mocks():
""""""
done = get_done()
for name in done:
stream = stream_model(name)
np.save('../data/streams/mock_observed_{}'.format(name), stream.obs)
def period(name):
"""Return orbital period in units of stepsize and number of complete periods"""
orbit = stream_orbit(name=name)
r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
a = np.abs(np.fft.rfft(r))
f = np.argmax(a[1:]) + 1
p = np.size(a)/f
return (p, f)
def extract_crbs(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', j=0, align=True, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
tout = Table(names=('name', 'crb'))
pparams0 = pparams_fid
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Np = len(pid_comp)
pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp))
plt.close()
fig, ax = plt.subplots(Np,1,figsize=(10,15), subplot_kw=dict(projection='mollweide'))
for name in names[:]:
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
crb = np.sqrt(np.diag(cx))
#print([pparams0[pid_comp[i]] for i in range(Np)])
crb_frac = [crb[pid_crb[i]]/pparams0[pid_comp[i]].value for i in range(Np)]
print(name, crb_frac)
stream = stream_model(name=name)
for i in range(Np):
plt.sca(ax[i])
color_index = np.array(crb_frac[:])
color_index[color_index>0.2] = 0.2
color_index /= 0.2
color = mpl.cm.viridis(color_index[i])
plt.plot(np.radians(stream.obs[0]), np.radians(stream.obs[1]), 'o', color=color, ms=4)
for i in range(Np):
plt.sca(ax[i])
#plt.xlabel('RA')
plt.ylabel('Dec')
plt.text(0.9, 0.9, '$\Delta$ {}'.format(get_parlabel(pid_comp[i])[0]), fontsize='medium', transform=plt.gca().transAxes, va='bottom', ha='left')
plt.grid()
plt.xlabel('RA')
# add custom colorbar
sm = plt.cm.ScalarMappable(cmap=mpl.cm.viridis, norm=plt.Normalize(vmin=0, vmax=20))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
if component=='bary':
cb_pad = 0.1
else:
cb_pad = 0.06
cb = fig.colorbar(sm, ax=ax.ravel().tolist(), pad=cb_pad, aspect=40, ticks=np.arange(0,21,5))
cb.set_label('Cramer $-$ Rao bounds (%)')
#plt.tight_layout()
plt.savefig('../plots/crb_onsky_{}.png'.format(component))
def vhrh_correlation(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', align=True):
""""""
names = get_done()
t = Table.read('../data/crb/ar_orbital_summary.fits')
N = len(names)
p = np.empty(N)
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
i = pid_comp[0]
j = pid_comp[1]
for e, name in enumerate(names):
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
p[e] = cx[i][j]/np.sqrt(cx[i][i]*cx[j][j])
plt.close()
plt.figure()
plt.plot(t['rapo'], p, 'ko')
def allstream_2d(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, relative=False):
"""Compare 2D constraints between all streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
N = len(names)
# plot setup
ncol = np.int64(np.ceil(np.sqrt(N)))
nrow = np.int64(np.ceil(N/ncol))
w_ = 8
h_ = 1.1 * w_*nrow/ncol
alpha = 1
lw = 2
frac = [0.8, 0.5, 0.2]
# parameter pairs
paramids = [8, 11, 12, 13, 14]
all_comb = list(itertools.combinations(paramids, 2))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
#print(comb)
pp = PdfPages('../plots/allstreams_2d_{}_a{:1d}_{}_r{:1d}.pdf'.format(errmode, align, vlabel, relative))
for c in range(Ncomb):
l, k = comb[c]
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(w_, h_), sharex=True, sharey=True)
for i in range(N):
plt.sca(ax[np.int64(i/ncol)][i%ncol])
for e, Ndim in enumerate([3,4,6]):
color = mpl.cm.bone(frac[e])
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, names[i], align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
cx_2d = np.array([[cx[k][k], cx[k][l]], [cx[l][k], cx[l][l]]])
if relative:
pk = pparams_fid[pid[k]].value
pl = pparams_fid[pid[l]].value
fid_2d = np.array([[pk**2, pk*pl], [pk*pl, pl**2]])
cx_2d = cx_2d / fid_2d * 100**2
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=color, alpha=alpha, lw=lw)
plt.gca().add_patch(e)
txt = plt.text(0.9, 0.9, full_name(names[i]), fontsize='small', transform=plt.gca().transAxes, ha='right', va='top')
txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))
if relative:
plt.xlim(-20, 20)
plt.ylim(-20,20)
else:
plt.gca().autoscale_view()
plabels, units = get_parlabel([pid[k],pid[l]])
if relative:
punits = [' (%)' for x in units]
else:
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
for i in range(ncol):
plt.sca(ax[nrow-1][i])
plt.xlabel(params[0])
for i in range(nrow):
plt.sca(ax[i][0])
plt.ylabel(params[1])
for i in range(N, ncol*nrow):
plt.sca(ax[np.int64(i/ncol)][i%ncol])
plt.axis('off')
plt.tight_layout(h_pad=0, w_pad=0)
pp.savefig(fig)
pp.close()
# circular velocity
def pder_vc(x, p=[pparams_fid[j] for j in [0,1,2,3,4,5,6,8,10]], components=['bary', 'halo']):
""""""
N = np.size(x)
# components
if 'bary' in components:
bulge = np.array([G*x*(x+p[1])**-2, -2*G*p[0]*x*(x+p[1])**-3])
aux = p[3] + p[4]
disk = np.array([G*x**2*(x**2 + aux**2)**-1.5, -3*G*p[2]*x**2*aux*(x**2 + aux**2)**-2.5, -3*G*p[2]*x**2*aux*(x**2 + aux**2)**-2.5])
nfw = np.array([2*p[5]*(p[6]/x*np.log(1+x.value/p[6].value) - (1+x.value/p[6].value)**-1), p[5]**2*(np.log(1+x.value/p[6].value)/x - (x+p[6])**-1 - x*(x+p[6])**-2), np.zeros(N), np.zeros(N)])
pder = np.vstack([bulge, disk, nfw])
else:
pder = np.array([2*p[0]*(p[1]/x*np.log(1+x.value/p[1].value) - (1+x.value/p[1].value)**-1), p[0]**2*(np.log(1+x.value/p[1].value)/x - (x+p[1])**-1 - x*(x+p[1])**-2), np.zeros(N), np.zeros(N)])
return pder
def delta_vc_vec(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='all', j=0, align=True, d=200, Nb=1000, fast=False, scale=False, ascale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
labels = full_names()
colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)}
#colors = {'gd1': mpl.cm.bone(0), 'atlas': mpl.cm.bone(0.5), 'tri': mpl.cm.bone(0.8)}
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
for name in names:
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
x = np.linspace(0.01, d, Nb)*u.kpc
Npix = np.size(x)
derf = np.transpose(pder_vc(x, components=components))
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Nb
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
#label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
else:
vcomb = vecs[:,j]*np.sqrt(vals[j])
#label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
mcomb = (vcomb*u.km**2*u.s**-2 * x / G).to(u.Msun)
vc_true = vcirc_potential(x, pparams=pparams_fid)
# relate to orbit
orbit = stream_orbit(name=name)
r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
rmin = np.min(r)
rmax = np.max(r)
rcur = r[0]
r0 = r[-1]
print(name, rcur, r0)
e = (rmax - rmin)/(rmax + rmin)
l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0)
p, Np = period(name)
np.savez('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dvc=np.sqrt(vcomb), vc=vc_true.value, r=x.value, rperi=rmin, rapo=rmax, rcur=rcur, r0=r0, ecc=e, l=l, p=p, Np=Np)
if ascale:
x = x * rmax**-1
#x = x * rcur**-1
# plot
plt.sca(ax[0])
plt.plot(x, np.sqrt(vcomb), '-', lw=3, color=colors[name], label=labels[name])
#plt.plot(x, vc_true, 'r-')
plt.sca(ax[1])
plt.plot(x, np.sqrt(vcomb)/vc_true, '-', lw=3, color=colors[name], label=labels[name])
#plt.plot(x, mcomb, '-', lw=3, color=colors[name], label=labels[name])
plt.sca(ax[0])
if ascale:
plt.xlim(0,5)
plt.xlabel('r/r$_{apo}$')
else:
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $V_c$ (km s$^{-1}$)')
#plt.ylim(0, 100)
plt.sca(ax[1])
plt.legend(loc=1, frameon=True, handlelength=1, fontsize='small')
if ascale:
plt.xlim(0,5)
plt.xlabel('r/r$_{apo}$')
else:
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $V_c$ / $V_c$')
#plt.ylabel('$\Delta$ $M_{enc}$ ($M_\odot$)')
#plt.ylim(0, 1e11)
plt.tight_layout()
plt.savefig('../plots/vc_r_summary_apo{:d}.pdf'.format(ascale))
def delta_vc_correlations(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='all', j=0, align=True, d=200, Nb=1000, r=False, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
elabel = ''
ylabel = 'min ($\Delta$ $V_c$ / $V_c$)'
if r:
ylabel = 'r(min($\Delta$ $V_c$ / $V_c$)) (kpc)'
elabel = 'r'
names = get_done()
labels = full_names()
colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)}
plt.close()
fig, ax = plt.subplots(2,3,figsize=(15,9))
for name in names:
d = np.load('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
rel_dvc = np.min(d['dvc'] / d['vc'])
if r:
idmin = np.argmin(d['dvc'] / d['vc'])
rel_dvc = d['r'][idmin]
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range'])
plt.sca(ax[0][0])
if r:
plt.plot(d['rapo'], d['rapo'], 'r.', zorder=0, lw=1.5)
plt.plot(d['rapo'], rel_dvc, 'o', ms=10, color=colors[name], label=labels[name])
plt.xlabel('$r_{apo}$ (kpc)')
plt.ylabel(ylabel)
plt.sca(ax[0][1])
#plt.plot(d['rcur']/d['rapo'], rel_dvc, 'o', ms=10, color=colors[name])
if r:
plt.plot(d['rapo'], d['rapo'], 'r.', zorder=0, lw=1.5)
plt.plot(d['rcur'], rel_dvc, 'o', ms=10, color=colors[name])
#plt.plot(d['r0'], rel_dvc, 'ro')
plt.xlabel('$r_{current}$')
plt.ylabel(ylabel)
plt.sca(ax[0][2])
ecc = np.sqrt(1 - (d['rperi']/d['rapo'])**2)
ecc = d['ecc']
plt.plot(ecc, rel_dvc, 'o', ms=10, color=colors[name], label=labels[name])
plt.xlabel('Eccentricity')
plt.ylabel(ylabel)
plt.sca(ax[1][0])
plt.plot(np.median(np.abs(d['l'][:,2])/np.linalg.norm(d['l'], axis=1)), rel_dvc, 'o', ms=10, color=colors[name])
plt.xlabel('|L_z|/|L|')
plt.ylabel(ylabel)
plt.sca(ax[1][1])
plt.plot(d['Np'], rel_dvc, 'o', ms=10, color=colors[name])
#plt.xlabel('$r_{peri}$ (kpc)')
plt.xlabel('Completed periods')
plt.ylabel(ylabel)
plt.sca(ax[1][2])
plt.plot(dlambda, rel_dvc, 'o', ms=10, color=colors[name])
plt.xlabel('$\Delta$ $\\xi$ (deg)')
plt.ylabel(ylabel)
plt.sca(ax[0][2])
plt.legend(fontsize='small', handlelength=0.1)
plt.tight_layout()
plt.savefig('../plots/delta_vc{}_correlations.pdf'.format(elabel))
def collate_orbit(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True):
"""Store all of the properties on streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
N = len(names)
Nmax = len(max(names, key=len))
tname = np.chararray(N, itemsize=Nmax)
vcmin = np.empty(N)
r_vcmin = np.empty(N)
Labs = np.empty((N,3))
lx = np.empty(N)
ly = np.empty(N)
lz = np.empty(N)
Lmod = np.empty(N)
period = np.empty(N)
Nperiod = np.empty(N)
ecc = np.empty(N)
rperi = np.empty(N)
rapo = np.empty(N)
rcur = np.empty(N)
length = np.empty(N)
for e, name in enumerate(names[:]):
d = np.load('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
idmin = np.argmin(d['dvc'] / d['vc'])
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range'])
tname[e] = name
vcmin[e] = (d['dvc'] / d['vc'])[idmin]
r_vcmin[e] = d['r'][idmin]
if e==0:
Nr = np.size(d['r'])
dvc = np.empty((N, Nr))
vc = np.empty((N, Nr))
r = np.empty((N, Nr))
dvc[e] = d['dvc']
vc[e] = d['dvc'] / d['vc']
r[e] = d['r']
Labs[e] = np.median(np.abs(d['l']), axis=0)
Lmod[e] = np.median(np.linalg.norm(d['l'], axis=1))
lx[e] = np.abs(np.median(d['l'][:,0]/np.linalg.norm(d['l'], axis=1)))
ly[e] = np.abs(np.median(d['l'][:,1]/np.linalg.norm(d['l'], axis=1)))
lz[e] = np.abs(np.median(d['l'][:,2]/np.linalg.norm(d['l'], axis=1)))
period[e] = d['p']
Nperiod[e] = d['Np']
ecc[e] = d['ecc']
rperi[e] = d['rperi']
rapo[e] = d['rapo']
rcur[e] = d['rcur']
length[e] = dlambda
t = Table([tname, vcmin, r_vcmin, dvc, vc, r, Labs, Lmod, lx, ly, lz, period, Nperiod, length, ecc, rperi, rapo, rcur], names=('name', 'vcmin', 'rmin', 'dvc', 'vc', 'r', 'Labs', 'Lmod', 'lx', 'ly', 'lz', 'period', 'Nperiod', 'length', 'ecc', 'rperi', 'rapo', 'rcur'))
t.pprint()
t.write('../data/crb/vc_orbital_summary.fits', overwrite=True)
# radial acceleration
def ar_r(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, Nsight=1, seed=39):
"""Calculate precision in radial acceleration as a function of galactocentric radius"""
np.random.seed(seed)
pid, dp_fid, vlabel = get_varied_pars(vary)
components = [c for c in vary if c!='progenitor']
names = get_done()
N = len(names)
Nmax = len(max(names, key=len))
tname = np.chararray(N, itemsize=Nmax)
armin = np.empty((N, Nsight))
r_armin = np.empty((N, Nsight))
Labs = np.empty((N,3))
lx = np.empty(N)
ly = np.empty(N)
lz = np.empty(N)
Lmod = np.empty(N)
period_ = np.empty(N)
Nperiod = np.empty(N)
ecc = np.empty(N)
rperi = np.empty(N)
rapo = np.empty(N)
rcur = np.empty(N)
length = np.empty(N)
Npix = 300
r = np.linspace(0.1, 200, Npix)
dar = np.empty((N, Nsight, Npix))
ar = np.empty((N, Nsight, Npix))
rall = np.empty((N, Nsight, Npix))
plt.close()
fig, ax = plt.subplots(1,3, figsize=(15,5))
for e, name in enumerate(names[:]):
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
cq = cx[6:,6:]
Npot = np.shape(cq)[0]
# relate to orbit
orbit = stream_orbit(name=name)
ro = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
rmin = np.min(ro)
rmax = np.max(ro)
rcur_ = ro[0]
r0 = ro[-1]
e_ = (rmax - rmin)/(rmax + rmin)
l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0)
p, Np = period(name)
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for s in range(Nsight):
if Nsight==1:
# single sightline
x0 = mock['x0']
xeq = coord.SkyCoord(ra=x0[0], dec=x0[1], distance=x0[2])
xg = xeq.transform_to(coord.Galactocentric)
rg = np.linalg.norm(np.array([xg.x.value, xg.y.value, xg.z.value]))
theta = np.arccos(xg.z.value/rg)
phi = np.arctan2(xg.y.value, xg.x.value)
else:
u_ = np.random.random(1)
v_ = np.random.random(1)
theta = np.arccos(2*u_ - 1)
phi = 2 * np.pi * v_
xin = np.array([r*np.sin(theta)*np.cos(phi), r*np.sin(theta)*np.sin(phi), r*np.cos(theta)]).T
arad_pix = np.empty((Npix, 1))
af = np.empty(Npix)
derf = np.empty((Npix, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_rad(xi, components=components)
af[i] = a
dadq = apder_rad(xi, components=components)
derf[i] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
# store
idmin = np.argmin(vcomb / np.abs(af))
armin[e][s] = (vcomb / np.abs(af))[idmin]
r_armin[e][s] = r[idmin]
dar[e][s] = vcomb
ar[e][s] = vcomb / np.abs(af)
rall[e][s] = r
dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range'])
tname[e] = name
Labs[e] = np.median(np.abs(l), axis=0)
Lmod[e] = np.median(np.linalg.norm(l, axis=1))
lx[e] = np.abs(np.median(l[:,0]/np.linalg.norm(l, axis=1)))
ly[e] = np.abs(np.median(l[:,1]/np.linalg.norm(l, axis=1)))
lz[e] = np.abs(np.median(l[:,2]/np.linalg.norm(l, axis=1)))
period_[e] = p
Nperiod[e] = Np
ecc[e] = e_
rperi[e] = rmin
rapo[e] = rmax
rcur[e] = rcur_
length[e] = dlambda
t = Table([tname, armin, r_armin, dar, ar, rall, Labs, Lmod, lx, ly, lz, period_, Nperiod, length, ecc, rperi, rapo, rcur], names=('name', 'armin', 'rmin', 'dar', 'ar', 'r', 'Labs', 'Lmod', 'lx', 'ly', 'lz', 'period', 'Nperiod', 'length', 'ecc', 'rperi', 'rapo', 'rcur'))
t.pprint()
t.write('../data/crb/ar_orbital_summary_{}_sight{:d}.fits'.format(vlabel, Nsight), overwrite=True)
plt.tight_layout()
def plot_ar(current=False, vary=['progenitor', 'bary', 'halo'], Nsight=1):
"""Explore constraints on radial acceleration, along the progenitor line"""
pid, dp_fid, vlabel = get_varied_pars(vary)
t = Table.read('../data/crb/ar_orbital_summary_{}_sight{:d}.fits'.format(vlabel, Nsight))
N = len(t)
fapo = t['rapo']/np.max(t['rapo'])
fapo = t['rapo']/100
flen = t['length']/(np.max(t['length']) + 10)
fcolor = fapo
plt.close()
fig, ax = plt.subplots(1, 4, figsize=(20,5))
for i in range(N):
color = mpl.cm.bone(fcolor[i])
lw = flen[i] * 5
plt.sca(ax[0])
plt.plot(t['r'][i][0], t['ar'][i][1], '-', color=color, lw=lw)
plt.xlabel('R (kpc)')
plt.ylabel('$\Delta$ $a_r$ / $a_r$')
plt.ylim(0, 3.5)
armin = np.median(t['armin'], axis=1)
armin_err = 0.5 * (np.percentile(t['armin'], 84, axis=1) - np.percentile(t['armin'], 16, axis=1))
rmin = np.median(t['rmin'], axis=1)
rmin_err = 0.5 * (np.percentile(t['rmin'], 84, axis=1) - np.percentile(t['rmin'], 16, axis=1))
plt.sca(ax[1])
plt.scatter(t['length'], armin, c=fcolor, cmap='bone', vmin=0, vmax=1)
plt.errorbar(t['length'], armin, yerr=armin_err, color='k', fmt='none', zorder=0)
plt.xlabel('Length (deg)')
plt.ylabel('min $\Delta$ $a_r$')
plt.ylim(0, 3.5)
plt.sca(ax[2])
a = np.linspace(0,90,100)
plt.plot(a, a, 'k-')
#plt.plot(a, 2*a, 'k--')
#plt.plot(a, 3*a, 'k:')
plt.scatter(t['rcur'], rmin, c=fcolor, cmap='bone', vmin=0, vmax=1)
plt.errorbar(t['rcur'], rmin, yerr=rmin_err, color='k', fmt='none', zorder=0)
plt.xlabel('$R_{cur}$ (kpc)')
plt.ylabel('$R_{min}$ (kpc)')
#for i in range(len(t)):
#plt.text(t['rcur'][i], rmin[i]+5, t['name'][i], fontsize='small')
plt.xlim(0,90)
plt.ylim(0,90)
plt.sca(ax[3])
a = np.linspace(0,90,100)
plt.plot(a, a, 'k-')
#plt.plot(a, 2*a, 'k--')
#plt.plot(a, 3*a, 'k:')
plt.scatter(t['rapo'], rmin, c=fcolor, cmap='bone', vmin=0, vmax=1)
plt.errorbar(t['rapo'], rmin, yerr=rmin_err, color='k', fmt='none', zorder=0)
plt.xlabel('$R_{apo}$ (kpc)')
plt.ylabel('$R_{min}$ (kpc)')
plt.xlim(0,90)
plt.ylim(0,90)
plt.tight_layout()
plt.savefig('../plots/ar_crb_{}_sight{:d}.pdf'.format(vlabel, Nsight))
# save stream constraints
tout = Table([t['name'], t['rapo'], t['rcur'], t['length'], rmin, rmin_err, armin, armin_err], names=('name', 'rapo', 'rcur', 'length', 'rmin', 'rmin_err', 'armin', 'armin_err'))
tout.write('../data/ar_constraints_{}_sight{}.fits'.format(vlabel, Nsight), overwrite=True)
def plot_all_ar(Nsight=50):
"""Explore constraints on radial acceleration, along the progenitor line"""
alist = [0.2, 0.4, 0.7, 1]
mslist = [11, 9, 7, 5]
lwlist = [8, 6, 4, 2]
fc = [0.8, 0.6, 0.4, 0.2]
vlist = [['progenitor', 'bary', 'halo'], ['progenitor', 'bary', 'halo', 'dipole'], ['progenitor', 'bary', 'halo', 'dipole', 'quad'], ['progenitor', 'bary', 'halo', 'dipole', 'quad', 'octu']]
labels = ['Fiducial Galaxy', '+ dipole', '++ quadrupole', '+++ octupole']
alist = [0.2, 0.55, 1]
#mslist = [11, 8, 5]
mslist = [13, 10, 7]
#lwlist = [8, 5, 2]
lwlist = [9, 6, 3]
fc = [0.8, 0.5, 0.2]
vlist = [['progenitor', 'bary', 'halo'], ['progenitor', 'bary', 'halo', 'dipole', 'quad'], ['progenitor', 'bary', 'halo', 'dipole', 'quad', 'octu']]
labels = ['Fiducial Galaxy', '++ quadrupole', '+++ octupole']
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(13.5,4.5))
for e, vary in enumerate(vlist):
pid, dp_fid, vlabel = get_varied_pars(vary)
t = Table.read('../data/crb/ar_orbital_summary_{}_sight{:d}.fits'.format(vlabel, Nsight))
N = len(t)
color = mpl.cm.viridis(fc[e])
lw = lwlist[e]
ms = mslist[e]
alpha = alist[e]
plt.sca(ax[0])
for i in range(0,5,4):
plt.plot(t['r'][i][0], t['ar'][i][1], '-', color=color, lw=lw, alpha=alpha)
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $a_r$ / $a_r$')
plt.ylim(0, 3.5)
armin = np.median(t['armin'], axis=1)
armin_err = 0.5 * (np.percentile(t['armin'], 84, axis=1) - np.percentile(t['armin'], 16, axis=1))
rmin = np.median(t['rmin'], axis=1)
rmin_err = 0.5 * (np.percentile(t['rmin'], 84, axis=1) - np.percentile(t['rmin'], 16, axis=1))
# fit exponential
p = np.polyfit(t['length'], np.log(armin), 1)
print(1/p[0], np.exp(p[1]))
poly = np.poly1d(p)
x_ = np.linspace(np.min(t['length']), np.max(t['length']), 100)
y_ = poly(x_)
plt.sca(ax[1])
plt.plot(x_, np.exp(y_), '-', color=color, alpha=alpha, lw=lw, label='')
plt.plot(t['length'], armin, 'o', color=color, ms=ms, alpha=alpha, label=labels[e])
plt.errorbar(t['length'], armin, yerr=armin_err, color=color, fmt='none', zorder=0, alpha=alpha)
#plt.plot(t['length'], np.log(armin), 'o', color=color, ms=ms, alpha=alpha, label=labels[e])
#plt.errorbar(t['length'], np.log(armin), yerr=np.log(armin_err), color=color, fmt='none', zorder=0, alpha=alpha)
if e==len(vlist)-1:
plt.legend(loc=1, fontsize='small', handlelength=0.5, frameon=False)
plt.xlabel('Stream length (deg)')
plt.ylabel('min $\Delta$ $a_r$')
plt.ylim(0, 3.5)
plt.sca(ax[2])
a = np.linspace(0,90,100)
plt.plot(a, a, 'k-', alpha=0.4)
plt.plot(t['rcur'], rmin, 'o', color=color, ms=ms, alpha=alpha)
plt.errorbar(t['rcur'], rmin, yerr=rmin_err, color=color, fmt='none', zorder=0, alpha=alpha)
plt.xlabel('$R_{cur}$ (kpc)')
plt.ylabel('$R_{min}$ (kpc)')
plt.xlim(0,90)
plt.ylim(0,90)
#plt.sca(ax[3])
#a = np.linspace(0,90,100)
#plt.plot(a, a, 'k-')
#plt.plot(t['rapo'], rmin, 'o', color=color, ms=ms, alpha=alpha)
#plt.errorbar(t['rapo'], rmin, yerr=rmin_err, color=color, fmt='none', zorder=0, alpha=alpha)
#plt.xlabel('$R_{apo}$ (kpc)')
#plt.ylabel('$R_{min}$ (kpc)')
#plt.xlim(0,90)
#plt.ylim(0,90)
plt.tight_layout()
plt.savefig('../plots/ar_crb_all_sight{:d}.pdf'.format(Nsight))
plt.savefig('../paper/ar_crb_all.pdf')
def ar_multi(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, Nsight=1, seed=39, verbose=True):
"""Calculate precision in radial acceleration as a function of galactocentric radius for multiple streams"""
np.random.seed(seed)
pid, dp_fid, vlabel = get_varied_pars(vary)
components = [c for c in vary if c!='progenitor']
Npar = len(pid)
names = get_done()
N = len(names)
Nmax = len(max(names, key=len))
armin = np.empty((N, Nsight))
r_armin = np.empty((N, Nsight))
Npix = 300
r = np.linspace(0.1, 200, Npix)
dar = np.empty((N, Nsight, Npix))
ar = np.empty((N, Nsight, Npix))
rall = np.empty((N, Nsight, Npix))
plt.close()
fig, ax = plt.subplots(1,1, figsize=(8,6))
plt.sca(ax)
for k in range(N):
names_in = [names[x] for x in range(k+1)]
if verbose: print(k, names_in)
cxi_all = np.zeros((Npar, Npar))
for e, name in enumerate(names_in):
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cxi_all = cxi_all + cxi
cx_all = stable_inverse(cxi_all)
cq = cx_all[6:,6:]
Npot = np.shape(cq)[0]
for s in range(Nsight):
if Nsight==1:
# single sightline
mock = pickle.load(open('../data/mock_{}.params'.format('gd1'), 'rb'))
x0 = mock['x0']
xeq = coord.SkyCoord(ra=x0[0], dec=x0[1], distance=x0[2])
xg = xeq.transform_to(coord.Galactocentric)
rg = np.linalg.norm(np.array([xg.x.value, xg.y.value, xg.z.value]))
theta = np.arccos(xg.z.value/rg)
phi = np.arctan2(xg.y.value, xg.x.value)
else:
u_ = np.random.random(1)
v_ = np.random.random(1)
theta = np.arccos(2*u_ - 1)
phi = 2 * np.pi * v_
xin = np.array([r*np.sin(theta)*np.cos(phi), r*np.sin(theta)*np.sin(phi), r*np.cos(theta)]).T
arad_pix = np.empty((Npix, 1))
af = np.empty(Npix)
derf = np.empty((Npix, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_rad(xi, components=components)
af[i] = a
dadq = apder_rad(xi, components=components)
derf[i] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
# store
idmin = np.argmin(vcomb / np.abs(af))
armin[k][s] = (vcomb / np.abs(af))[idmin]
r_armin[k][s] = r[idmin]
dar[k][s] = vcomb
ar[k][s] = vcomb / np.abs(af)
rall[k][s] = r
plt.plot(rall[k][s], ar[k][s]*100, '-', color=mpl.cm.viridis_r(k/12.), lw=2)
t = Table([armin, r_armin, dar, ar, rall], names=('armin', 'rmin', 'dar', 'ar', 'r'))
t.pprint()
t.write('../data/crb/ar_multistream{}_{}_sight{:d}.fits'.format(N, vlabel, Nsight), overwrite=True)
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $a_r$ / $a_r$ (%)')
plt.ylim(0,100)
# add custom colorbar
sm = plt.cm.ScalarMappable(cmap=mpl.cm.viridis_r, norm=plt.Normalize(vmin=1, vmax=12))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes('right', size='4%', pad=0.05)
#cb = fig.colorbar(sm, ax=cax, pad=0.1, aspect=40, ticks=np.arange(1,13,3))
cb = plt.colorbar(sm, cax=cax, ticks=np.arange(1,13,3))
cb.set_label('Number of streams')
plt.tight_layout()
plt.savefig('../plots/ar_multistream{}_{}_sight{:d}.png'.format(N, vlabel, Nsight))
# flattening
def delta_q(q='x', Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', j=0, align=True, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
kq = {'x': 0, 'z': 2}
iq = {'x': 2, 'z': 3}
labelq = {'x': '$_x$', 'z': '$_z$'}
component = 'halo'
pparams0 = pparams_fid
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Np = len(pid_comp)
pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp))
names = get_done()
labels = full_names()
colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)}
plt.close()
fig, ax = plt.subplots(1,3,figsize=(15,5))
for name in names:
#for n in [-1,]:
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
crb_all = np.sqrt(np.diag(cx))
crb = [crb_all[pid_crb[i]] for i in range(Np)]
crb_frac = [crb_all[pid_crb[i]]/pparams0[pid_comp[i]].value for i in range(Np)]
delta_q = crb[iq[q]]
## choose the appropriate components:
#Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
#if 'progenitor' not in vary:
#Nprog = 0
#nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
#nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
#if 'progenitor' not in vary:
#nstart['dipole'] = Npoint
#nend['dipole'] = Npoint + Ndipole
#if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
#components = [component]
#else:
#components = [x for x in vary if x!='progenitor']
#cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
#if ('progenitor' not in vary) & ('bary' not in vary):
#cq = cx
#Npot = np.shape(cq)[0]
#if scale:
#dp_opt = read_optimal_step(n, vary)
#dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
#dp_unit = unity_scale(dp)
#scale_vec = np.array([x.value for x in dp_unit[nstart[component]:nend[component]]])
#scale_mat = np.outer(scale_vec, scale_vec)
#cqi /= scale_mat
#delta_q = np.sqrt(cq[iq[q], iq[q]])
# relate to orbit
orbit = stream_orbit(name=name)
r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
rmin = np.min(r)
rmax = np.max(r)
e = (rmax - rmin)/(rmax + rmin)
e = rmin/rmax
l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0)
ltheta = np.median(l[:,kq[q]]/np.linalg.norm(l, axis=1))
langle = np.degrees(np.arccos(ltheta))
sigltheta = np.std(l[:,kq[q]]/np.linalg.norm(l, axis=1))
plt.sca(ax[0])
plt.plot(e, delta_q, 'o', color=colors[name], label=labels[name])
plt.sca(ax[1])
plt.plot(sigltheta, delta_q, 'o', color=colors[name], label=labels[name])
plt.sca(ax[2])
plt.plot(np.abs(ltheta), delta_q, 'o', color=colors[name], label=labels[name])
plt.sca(ax[0])
plt.legend(frameon=False, handlelength=1, fontsize='small')
plt.xlabel('Eccentricity')
plt.ylabel('$\Delta$ q{}'.format(labelq[q]))
plt.xlim(0,1)
#plt.ylim(0, 1e11)
plt.sca(ax[1])
plt.xlabel('$\sigma$ L{}'.format(labelq[q]) + ' (kpc km s$^{-1}$)')
plt.ylabel('$\Delta$ q{}'.format(labelq[q]))
plt.sca(ax[2])
plt.xlabel('|L{}| / |L|'.format(labelq[q]))
plt.ylabel('$\Delta$ q{}'.format(labelq[q]))
plt.tight_layout()
plt.savefig('../plots/delta_q{}.pdf'.format(q))
###
# multiple streams
###
def pairs_pdf(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='halo', align=True, summary=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad} #, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
pid_comp = pid[nstart[component]:nend[component]]
plabels, units = get_parlabel(pid_comp)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
done = get_done()
N = len(done)
pp = PdfPages('../plots/corner_pairs_{:s}{:1d}_a{:1d}_{:s}_{:s}_{:d}.pdf'.format(errmode, Ndim, align, vlabel, component, summary))
fig = None
ax = None
for i in range(N):
di = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, done[i], align, vlabel))
cxi_i = di['cxi']
for j in range(i+1,N):
dj = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, done[j], align, vlabel))
cxi_j = dj['cxi']
cxi = cxi_i + cxi_j
cx = stable_inverse(cxi)
cx_i = stable_inverse(cxi_i)
cx_j = stable_inverse(cxi_j)
# select component of the parameter space
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
cq_i = cx_i[nstart[component]:nend[component], nstart[component]:nend[component]]
cq_j = cx_j[nstart[component]:nend[component], nstart[component]:nend[component]]
Nvar = np.shape(cq)[0]
print(done[i], done[j])
print(np.sqrt(np.diag(cq)))
print(np.sqrt(np.diag(cq_i)))
print(np.sqrt(np.diag(cq_j)))
if summary==False:
fig = None
ax = None
# plot ellipses
fig, ax = corner_ellipses(cq, fig=fig, ax=ax)
fig, ax = corner_ellipses(cq_i, alpha=0.5, fig=fig, ax=ax)
fig, ax = corner_ellipses(cq_j, alpha=0.5, fig=fig, ax=ax)
# labels
plt.title('{} & {}'.format(done[i], done[j]))
for k in range(Nvar-1):
plt.sca(ax[-1][k])
plt.xlabel(params[k])
plt.sca(ax[k][0])
plt.ylabel(params[k+1])
pp.savefig(fig)
else:
fig, ax = corner_ellipses(cq, fig=fig, ax=ax, alpha=0.5)
if summary:
# labels
for k in range(Nvar-1):
plt.sca(ax[-1][k])
plt.xlabel(params[k])
plt.sca(ax[k][0])
plt.ylabel(params[k+1])
pp.savefig(fig)
pp.close()
def multi_pdf(Nmulti=3, Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='halo', align=True):
"""Create a pdf with each page containing a corner plot with constraints on a given component of the model from multiple streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
Ntot = len(pid)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad} #, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
pid_comp = pid[nstart[component]:nend[component]]
plabels, units = get_parlabel(pid_comp)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
Nvar = len(pid_comp)
pparams0 = pparams_fid
pparams_comp = [pparams0[x] for x in pid_comp]
pparams_arr = np.array([x.value for x in pparams_comp])
pp = PdfPages('../plots/corner_multi{:d}_{:s}{:1d}_a{:1d}_{:s}_{:s}.pdf'.format(Nmulti, errmode, Ndim, align, vlabel, component))
fig = None
ax = None
done = get_done()
N = len(done)
if Nmulti>N:
Nmulti = N
t = np.arange(N, dtype=np.int64).tolist()
all_comb = list(itertools.combinations(t, Nmulti))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
comb_all = np.ones((Ncomb, N)) * np.nan
cx_all = np.empty((Ncomb, Nvar, Nvar))
p_all = np.empty((Ncomb, Nvar))
prel_all = np.empty((Ncomb, Nvar))
for i in range(Ncomb):
print(i, [done[i_] for i_ in comb[i]])
cxi = np.zeros((Ntot, Ntot))
fig = None
ax = None
for j in range(Nmulti):
ind = comb[i][j]
#print('{} '.format(done[ind]), end='')
dj = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, done[ind], align, vlabel))
cxi_ = dj['dxi']
cxi = cxi + cxi_
# select component of the parameter space
cx_ = stable_inverse(cxi_)
cq_ = cx_[nstart[component]:nend[component], nstart[component]:nend[component]]
if Ncomb==1:
np.save('../data/crb/cx_multi1_{:s}{:1d}_{:s}_a{:1d}_{:s}_{:s}'.format(errmode, Ndim, done[ind], align, vlabel, component), cq_)
print(np.sqrt(np.diag(cq_)))
fig, ax = corner_ellipses(cq_, alpha=0.5, fig=fig, ax=ax)
cx = stable_inverse(cxi + dj['pxi'])
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
print(np.sqrt(np.diag(cq)))
#label = '.'.join([done[comb[i][i_]] for i_ in range(Nmulti)])
#np.save('../data/crb/cx_multi{:d}_{:s}{:1d}_{:s}_a{:1d}_{:s}_{:s}'.format(Nmulti, errmode, Ndim, label, align, vlabel, component), cq)
cx_all[i] = cq
p_all[i] = np.sqrt(np.diag(cq))
prel_all[i] = p_all[i]/pparams_arr
comb_all[i][:Nmulti] = np.array(comb[i])
fig, ax = corner_ellipses(cq, fig=fig, ax=ax)
# labels
title = ' + '.join([done[comb[i][i_]] for i_ in range(Nmulti)])
plt.suptitle(title)
for k in range(Nvar-1):
plt.sca(ax[-1][k])
plt.xlabel(params[k])
plt.sca(ax[k][0])
plt.ylabel(params[k+1])
plt.tight_layout(rect=(0,0,1,0.95))
pp.savefig(fig)
np.savez('../data/crb/cx_collate_multi{:d}_{:s}{:1d}_a{:1d}_{:s}_{:s}'.format(Nmulti, errmode, Ndim, align, vlabel, component), comb=comb_all, cx=cx_all, p=p_all, p_rel=prel_all)
pp.close()
def collate(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='halo', align=True, Nmax=None):
""""""
done = get_done()
N = len(done)
if Nmax==None:
Nmax = N
t = np.arange(N, dtype=np.int64).tolist()
pid, dp_fid, vlabel = get_varied_pars(vary)
Ntot = len(pid)
pparams0 = pparams_fid
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Np = len(pid_comp)
pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp))
pparams_comp = [pparams0[x] for x in pid_comp]
pparams_arr = np.array([x.value for x in pparams_comp])
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad} #, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
pid_comp = pid[nstart[component]:nend[component]]
plabels, units = get_parlabel(pid_comp)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
Nvar = len(pid_comp)
for i in range(1, Nmax+1):
Nmulti = i
all_comb = list(itertools.combinations(t, Nmulti))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
comb_all = np.ones((Ncomb, N)) * np.nan
cx_all = np.empty((Ncomb, Nvar, Nvar))
p_all = np.empty((Ncomb, Nvar))
prel_all = np.empty((Ncomb, Nvar))
for j in range(Ncomb):
label = '.'.join([done[comb[j][i_]] for i_ in range(Nmulti)])
cx = np.load('../data/crb/cx_multi{:d}_{:s}{:1d}_{:s}_a{:1d}_{:s}_{:s}.npy'.format(Nmulti, errmode, Ndim, label, align, vlabel, component))
cx_all[j] = cx
p_all[j] = np.sqrt(np.diag(cx))
prel_all[j] = p_all[j]/pparams_arr
comb_all[j][:Nmulti] = np.array(comb[j])
np.savez('../data/crb/cx_collate_multi{:d}_{:s}{:1d}_a{:1d}_{:s}_{:s}'.format(Nmulti, errmode, Ndim, align, vlabel, component), comb=comb_all, cx=cx_all, p=p_all, p_rel=prel_all)
def nstream_improvement(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='halo', align=True, relative=False):
"""Show how much parameters improve by including additional streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
done = get_done()
N = len(done)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad} #, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
pid_comp = pid[nstart[component]:nend[component]]
plabels, units = get_parlabel(pid_comp)
if relative:
punits = [' (%)' for x in units]
else:
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
Nvar = len(pid_comp)
pparams0 = pparams_fid
pparams_comp = [pparams0[x] for x in pid_comp]
pparams_arr = np.array([x.value for x in pparams_comp])
median = np.empty((Nvar, N))
x = np.arange(N) + 1
da = 3
ncol = 2
nrow = np.int64(Nvar/ncol)
w = 4 * da
h = nrow * da
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(w,h), sharex='col')
for i in range(N):
Nmulti = i+1
t = np.arange(N, dtype=np.int64).tolist()
all_comb = list(itertools.combinations(t, Nmulti))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
coll = np.load('../data/crb/cx_collate_multi{:d}_{:s}{:1d}_a{:1d}_{:s}_{:s}.npz'.format(Nmulti, errmode, Ndim, align, vlabel, component))
comb_all = coll['comb']
cq_all = coll['cx']
p_all = coll['p']
if relative:
p_all = p_all * 100 / pparams_arr
median = np.median(p_all, axis=0)
Ncomb = np.shape(comb_all)[0]
nst = np.ones(Ncomb) * Nmulti
for k in range(Nvar):
plt.sca(ax[k%ncol][np.int64(k/ncol)])
if (i==0) & (k==0):
plt.plot(nst, p_all[:,k], 'o', color='0.8', ms=10, label='Single combination of N streams')
plt.plot(Nmulti, median[k], 'wo', mec='k', mew=2, ms=10, label='Median over different\ncombinations of N streams')
else:
plt.plot(nst, p_all[:,k], 'o', color='0.8', ms=10)
plt.plot(Nmulti, median[k], 'wo', mec='k', mew=2, ms=10)
if Nmulti<=3:
if Nmulti==1:
Nmin = 3
else:
Nmin = 1
ids_min = p_all[:,k].argsort()[:Nmin]
for j_ in range(Nmin):
best_names = [done[np.int64(i_)] for i_ in comb[ids_min[j_]][:Nmulti]]
print(k, j_, best_names)
label = ', '.join(best_names)
plt.text(Nmulti, p_all[ids_min[j_],k], '{}'.format(label), fontsize='xx-small')
#print(ids_min)
#idmin = np.argmin(p_all[:,k])
#print(k, [done[np.int64(i_)] for i_ in comb[idmin][:Nmulti]])
for k in range(Nvar):
plt.sca(ax[k%ncol][np.int64(k/ncol)])
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
if relative:
plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
plt.ylabel(params[k])
if k==0:
plt.legend(frameon=False, fontsize='small', loc=1)
if k%ncol==nrow-1:
plt.xlabel('Number of streams in a combination')
plt.tight_layout()
plt.savefig('../plots/nstream_improvement_{:s}{:1d}_a{:1d}_{:s}_{:s}_{:1d}.pdf'.format(errmode, Ndim, align, vlabel, component, relative))
def corner_ellipses(cx, dax=2, color='k', alpha=1, lw=2, fig=None, ax=None, autoscale=True, correlate=False):
"""Corner plot with ellipses given by an input matrix"""
# assert square matrix
Nvar = np.shape(cx)[0]
if correlate:
Npair = np.int64(Nvar*(Nvar - 1)/2)
pcc = np.empty((3,Npair))
k = 0
if (np.any(fig)==None) | (np.any(ax)==None):
plt.close()
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
if correlate:
pcc[0,k] = i
pcc[1,k] = j
pcc[2,k] = cx[i][j]/np.sqrt(cx[i][i]*cx[j][j])
k += 1
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=color, alpha=alpha, lw=lw)
plt.gca().add_patch(e)
if autoscale:
plt.gca().autoscale_view()
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.tight_layout()
if correlate:
return(fig, ax, pcc)
else:
return (fig, ax)
###
# compare observing modes
###
def comp_errmodes_old(n, errmodes=['binospec', 'fiducial', 'hectochelle'], Ndim=4, vary=['progenitor', 'bary', 'halo'], plot='halo', align=True, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
if align:
alabel = '_align'
else:
alabel = ''
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
if scale:
dp_unit = unity_scale(dp)
#print(dp_unit)
dp_unit = dp_unit[i0:i1]
pid = pid[i0:i1]
#print(params, dp_unit, Nvar, len(pid), len(dp_unit))
#label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
label = errmodes
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for l, errmode in enumerate(errmodes):
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
#print(np.sqrt(np.diag(cx)))
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l])
plt.gca().add_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('../plots/crb_triangle_alldim{:s}_comparison_{:d}_{:s}_{:s}.pdf'.format(alabel, n, vlabel, plot))
def comp_obsmodes(vary=['progenitor', 'bary', 'halo'], align=True, component='halo'):
"""Compare CRBs from different observing modes"""
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Nvar = len(pid_comp)
plabels, units = get_parlabel(pid_comp)
punits = [' (%)' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
plainlabels = ['V_h', 'R_h', 'q_x', 'q_z']
names = get_done()
errmodes = ['fiducial', 'fiducial', 'fiducial', 'desi', 'gaia']
Ndims = [ 3, 4, 6, 4, 6]
Nmode = len(errmodes)
# fiducial
errmode = 'fiducial'
Ndim = 6
coll_fiducial = np.load('../data/crb/cx_collate_multi1_{:s}{:1d}_a{:1d}_{:s}_{:s}.npz'.format(errmode, Ndim, align, vlabel, component))
#errmodes = ['fiducial', 'gaia', 'desi']
#Ndims = [6,6,4]
labels = {'desi': 'DESI-like', 'gaia': 'Gaia-like', 'fiducial': 'Fiducial'}
cfrac = {'desi': 0.8, 'gaia': 0.6, 'fiducial': 0.2}
cmap = {'fiducial': mpl.cm.bone, 'desi': mpl.cm.pink, 'gaia': mpl.cm.pink}
frac = [0.8, 0.5, 0.2, 0.5, 0.2]
ls_all = ['-', '-', '-', '--', '--']
a = 0.7
da = 3
ncol = 2
nrow = np.int64(Nvar/ncol)
w = 4 * da
h = nrow * da * 1.3
plt.close()
fig, ax = plt.subplots(nrow+2, ncol, figsize=(w, h), sharex=True, gridspec_kw = {'height_ratios':[3, 1.2, 3, 1.2]})
for i in range(Nmode):
errmode = errmodes[i]
Ndim = Ndims[i]
coll = np.load('../data/crb/cx_collate_multi1_{:s}{:1d}_a{:1d}_{:s}_{:s}.npz'.format(errmode, Ndim, align, vlabel, component))
lw = np.sqrt(Ndims[i]) * 2
ls = ls_all[i]
#color = mpl.cm.bone(cfrac[errmodes[i]])
color = cmap[errmode](frac[i])
for j in range(Nvar):
#plt.sca(ax[j])
plt.sca(ax[j%ncol*2][np.int64(j/ncol)])
if labels[errmode]=='Fiducial':
label = '{} {}D'.format(labels[errmode], Ndims[i])
else:
label = '{} ({}D)'.format(labels[errmode], Ndims[i])
plt.plot(coll['p_rel'][:,j]*100, '-', ls=ls, alpha=a, lw=lw, color=color, label=label)
plt.sca(ax[j%ncol*2+1][np.int64(j/ncol)])
plt.plot(coll['p_rel'][:,j]/coll_fiducial['p_rel'][:,j], '-', ls=ls, alpha=a, lw=lw, color=color)
#print(errmode, j, np.median(coll['p_rel'][:,j]/coll_fiducial['p_rel'][:,j]), np.std(coll['p_rel'][:,j]/coll_fiducial['p_rel'][:,j]))
for j in range(Nvar):
plt.sca(ax[j%ncol*2][np.int64(j/ncol)])
plt.ylabel(params[j])
plt.gca().set_yscale('log')
plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.sca(ax[j%ncol*2+1][np.int64(j/ncol)])
plt.ylabel('$\\frac{\Delta %s}{\Delta {%s}_{,\,Fid\,6D}}$'%(plainlabels[j], plainlabels[j]), fontsize='medium')
plt.ylim(0.5, 10)
plt.gca().set_yscale('log')
plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.sca(ax[nrow][ncol-1])
plt.legend(loc=0, fontsize='x-small', handlelength=0.8, frameon=True)
# stream names
for j in range(ncol):
plt.sca(ax[0][j])
y0, y1 = plt.gca().get_ylim()
fp = 0.8
yp = y0 + fp*(y1-y0)
for e, name in enumerate(names):
txt = plt.text(e, yp, name, ha='center', va='top', rotation=90, fontsize='x-small', color='0.2')
txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))
plt.tight_layout()
plt.savefig('../plots/obsmode_comparison.pdf')
def vel_improvement(vary=['progenitor', 'bary', 'halo'], align=True, component='halo', errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Nvar = len(pid_comp)
plabels, units = get_parlabel(pid_comp)
punits = [' (%)' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
names = get_done()
coll = []
for Ndim in [3,4,6]:
coll += [np.load('../data/crb/cx_collate_multi1_{:s}{:1d}_a{:1d}_{:s}_{:s}.npz'.format(errmode, Ndim, align, vlabel, component))]
rv = coll[0]['p_rel'] / coll[1]['p_rel']
pm = coll[1]['p_rel'] / coll[2]['p_rel']
N = len(names)
prog_rv = np.empty(N)
prog_pm = np.empty(N)
for i in range(N):
mock = pickle.load(open('../data/mock_{}.params'.format(names[i]), 'rb'))
pms = np.array([x.value for x in mock['v0'][1:]])
prog_rv[i] = np.abs(mock['v0'][0].value)
#prog_pm[i] = np.linalg.norm(pms)
prog_pm[i] = max(np.abs(pms))
da = 2
plt.close()
fig, ax = plt.subplots(Nvar, 3, figsize=(da*3, da*Nvar), sharex='col')
for j in range(Nvar):
plt.sca(ax[j][0])
plt.plot(prog_rv, rv[:,j], 'ko')
plt.sca(ax[j][1])
plt.plot(prog_rv/prog_pm, pm[:,j], 'ko')
plt.sca(ax[j][2])
plt.plot(prog_pm, pm[:,j], 'ko')
plt.tight_layout()
###
# Referee's report
###
def mass_age(name='atlas', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for i in range(3):
mock['x0'][i] += pparams0[26+i]
mock['v0'][i] += pparams0[29+i]
# vary potential parameters
potential = 'octu'
pparams = pparams0[:26]
#print(pparams[0])
pparams[0] = (10**pparams0[0].value)*pparams0[0].unit
pparams[2] = (10**pparams0[2].value)*pparams0[2].unit
#pparams[0] = pparams0[0]*1e15
#pparams[2] = pparams0[2]*1e15
#print(pparams[0])
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
ylabel = ['Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas yr$^{-1}$)', '$\mu_\delta$ (mas yr$^{-1}$)']
plt.close()
fig, ax = plt.subplots(2, 5, figsize=(20,7), sharex='col', sharey='col', squeeze=False)
for e, f in enumerate(np.arange(0.8,1.21,0.1)[::-1]):
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': f*mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
for i in range(5):
plt.sca(ax[0][i])
plt.gca().invert_xaxis()
#plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=mpl.cm.viridis(e/5), mec='none', ms=4, label='{:.2g}$\\times$10$^3$ M$_\odot$'.format(f*mock['mi'].to(u.Msun).value*1e-3))
if (i==0) & (e==4):
plt.legend(frameon=True, handlelength=0.5, fontsize='small', markerscale=1.5)
if i==2:
plt.title('Age = {:.2g}'.format(mock['age'].to(u.Gyr)), fontsize='medium')
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': f*mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
for i in range(5):
plt.sca(ax[1][i])
plt.gca().invert_xaxis()
plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=mpl.cm.viridis(e/5), mec='none', ms=4, label='{:.2g}'.format(f*mock['age'].to(u.Gyr)))
if (i==0) & (e==4):
plt.legend(frameon=True, handlelength=0.5, fontsize='small', markerscale=1.5)
if i==2:
plt.title('Initial mass = {:.2g}$\\times$10$^3$ M$_\odot$'.format(mock['mi'].to(u.Msun).value*1e-3), fontsize='medium')
plt.tight_layout(w_pad=0)
plt.savefig('../paper/age_mass_{}.png'.format(name))
# progenitor's orbit
def prog_orbit(n):
""""""
orbit = stream_orbit(n)
R = np.linalg.norm(orbit['x'][:2,:].to(u.kpc), axis=0)[::-1]
x = orbit['x'][0].to(u.kpc)[::-1]
y = orbit['x'][1].to(u.kpc)[::-1]
z = orbit['x'][2].to(u.kpc)[::-1]
c = np.arange(np.size(z))[::-1]
plt.close()
fig, ax = plt.subplots(1,3,figsize=(15,5))
plt.sca(ax[0])
plt.scatter(x, y, c=c, cmap=mpl.cm.gray)
plt.xlabel('X (kpc)')
plt.ylabel('Y (kpc)')
plt.sca(ax[1])
plt.scatter(x, z, c=c, cmap=mpl.cm.gray)
plt.xlabel('X (kpc)')
plt.ylabel('Z (kpc)')
plt.sca(ax[2])
plt.scatter(y, z, c=c, cmap=mpl.cm.gray)
plt.xlabel('Y (kpc)')
plt.ylabel('Z (kpc)')
plt.tight_layout()
plt.savefig('../plots/orbit_cartesian_{}.png'.format(n))
#plt.scatter(R[::-1], z[::-1], c=c[::-1], cmap=mpl.cm.gray)
#plt.plot(Rp, zp, 'ko', ms=10)
#plt.xlim(0,40)
#plt.ylim(-20,20)
def prog_orbit3d(name, symmetry=False):
""""""
orbit = stream_orbit(name)
R = np.linalg.norm(orbit['x'][:2,:].to(u.kpc), axis=0)[::-1]
x = orbit['x'][0].to(u.kpc)[::-1].value
y = orbit['x'][1].to(u.kpc)[::-1].value
z = orbit['x'][2].to(u.kpc)[::-1].value
c = np.arange(np.size(z))[::-1]
plt.close()
fig = plt.figure(figsize=(9,9))
ax = fig.add_subplot(1,1,1, projection='3d')
if symmetry:
azimuth = {-1: 119, -2: -39, -3: -5, -4: -11}
elevation = {-1: 49, -2: -117, -3: 49, -4: 60}
ax.view_init(azim=azimuth[n], elev=elevation[n])
else:
ax.view_init(azim=-10, elev=30)
ax.set_frame_on(False)
ax.scatter(x, y, z, 'o', depthshade=False, c=c, cmap=mpl.cm.YlOrBr_r)
ax.set_xlabel('X (kpc)')
ax.set_ylabel('Y (kpc)')
ax.set_zlabel('Z (kpc)')
plt.title('{}'.format(name))
plt.tight_layout()
plt.savefig('../plots/orbit_3d_{}_{:d}.png'.format(name, symmetry))
def stream_orbit(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), diagnostic=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
#for i in range(3):
#mock['x0'][i] += pparams0[19+i]
#mock['v0'][i] += pparams0[22+i]
# vary potential parameters
potential = 'quad'
pparams = pparams0[:19]
pparams[0] = pparams0[0]*1e10
pparams[2] = pparams0[2]*1e10
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.prog_orbit()
if diagnostic:
r = np.linalg.norm(stream.orbit['x'].to(u.kpc), axis=0)
rmin = np.min(r)
rmax = np.max(r)
e = (rmax - rmin)/(rmax + rmin)
print(rmin, rmax, e)
return stream.orbit
def check_rcur():
""""""
done = get_done()[::-1]
N = len(done)
t = Table.read('../data/crb/ar_orbital_summary.fits')
for i, name in enumerate(done):
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
c = coord.ICRS(ra=mock['x0'][0], dec=mock['x0'][1], distance=mock['x0'][2])
gal = c.transform_to(coord.Galactocentric)
rcur = np.sqrt(gal.x**2 + gal.y**2 + gal.z**2).to(u.kpc)
print(done[i], rcur, np.array(t[t['name']==name]['rcur']))
# summary of parameter constraints
def relative_crb(vary=['progenitor', 'bary', 'halo'], component='all', Ndim=6, align=True, fast=False, scale=False):
"""Plot crb_param/param for 3 streams"""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': len(pid), 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
plabels, units = get_parlabel(pid)
#params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
params = [x for x in plabels]
params = params[nstart[component]:nend[component]]
Nvar = len(params)
xpos = np.arange(Nvar)
params_fid = np.array([pparams_fid[x].value for x in pid[nstart[component]:nend[component]]])
plt.close()
plt.figure(figsize=(10,6))
for n in [-1,-2,-3]:
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cq /= scale_mat
crb = np.sqrt(np.diag(cq))
crb_rel = crb / params_fid
print(fancy_name(n))
#print(crb)
print(crb_rel)
plt.plot(xpos, crb_rel, 'o', label='{}'.format(fancy_name(n)))
plt.legend(fontsize='small')
plt.ylabel('Relative CRB')
plt.xticks(xpos, params, rotation='horizontal', fontsize='medium')
plt.xlabel('Parameter')
plt.ylim(0, 0.2)
#plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/relative_crb_{:s}_{:s}_{:d}.png'.format(vlabel, component, Ndim))
def relative_crb_sky(vary=['progenitor', 'bary', 'halo'], component='all', Ndim=6, align=True, fast=False, scale=False):
""""""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': len(pid), 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
plabels, units = get_parlabel(pid)
#params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
params = [x for x in plabels]
params = params[nstart[component]:nend[component]]
Nvar = len(params)
xpos = np.arange(Nvar)
params_fid = np.array([pparams_fid[x].value for x in pid[nstart[component]:nend[component]]])
dd = 5
plt.close()
fig, ax = plt.subplots(Nvar, 2, figsize=(dd, 0.5*dd*Nvar), sharex='col', sharey='col', gridspec_kw = {'width_ratios':[6, 1]})
for n in [-1,-2,-3]:
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cq /= scale_mat
crb = np.sqrt(np.diag(cq))
crb_rel = crb / params_fid
#print(fancy_name(n))
##print(crb)
#print(crb_rel)
stream = stream_model(n)
for i in range(Nvar):
vmin, vmax = -2, 2
cind = (np.log10(crb_rel[i]) - vmin)/(vmax - vmin)
color = mpl.cm.magma_r(cind)
plt.sca(ax[i])
plt.plot(stream.obs[0], stream.obs[1], 'o', color=color)
for i in range(Nvar):
plt.sca(ax[i])
plt.gca().set_axis_bgcolor(mpl.cm.magma(0))
plt.gca().invert_xaxis()
plt.title(params[i], fontsize='medium')
plt.ylabel('Dec (deg)')
if i==Nvar-1:
plt.xlabel('R.A. (deg)')
#plt.legend(fontsize='small')
#plt.ylabel('Relative CRB')
#plt.xticks(xpos, params, rotation='horizontal', fontsize='medium')
#plt.xlabel('Parameter')
#plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/relative_crb_sky_{:s}_{:s}_{:d}.png'.format(vlabel, component, Ndim))
# toy problem: kepler + dipole
import sklearn.datasets
def create_fmi(n, Ndim=4, niter=20, alabel='_align', vlabel='point_dipole', Nobsdim=6):
""""""
state = n
invertible = False
cnt = 0
for cnt in range(niter):
cxi = sklearn.datasets.make_spd_matrix(Ndim, random_state=state)
cx = stable_inverse(cxi)
invertible = np.allclose(np.matmul(cxi, cx), np.eye(Ndim))
if invertible:
break
else:
state = np.random.get_state()
np.save('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}'.format(alabel, n, vlabel, Nobsdim), cxi)
cx[0,1:] = 0
cx[1:,0] = 0
cxi = stable_inverse(cx)
np.save('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}'.format(alabel, n+1, vlabel, Nobsdim), cxi)
def basic_fmi(n=0, alabel='_align', vlabel='point_dipole', Nobsdim=6):
""""""
Ndim = 4
cxi = np.diag([1.5, 3, 1, 1])
np.save('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}'.format(alabel, n, vlabel, Nobsdim), cxi)
def crb_toy(n, alabel='_align', Nobsdim=6, vlabel='point_dipole'):
""""""
def talk_crb_triangle(n=-1, vary=['progenitor', 'bary', 'halo'], plot='all', reveal=0, fast=False, scale=False):
"""Produce a triangle plot of 2D Cramer-Rao bounds for all model parameters using a given stream"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
alabel='_align'
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
#label = ['GD-1', 'Pal 5']
label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
#name = columns[int(np.abs(n)-1)]
#labels = ['RA, Dec, d', 'RA, Dec, d,\n$V_r$', 'RA, Dec, d,\n$V_r$, $\mu_\\alpha$, $\mu_\\delta$']
#params0 = ['$V_h$ (km/s)', '$R_h$ (kpc)', '$q_1$', '$q_z$', '$M_{LMC}$', '$X_p$', '$Y_p$', '$Z_p$', '$V_{xp}$', '$V_{yp}$', '$V_{zp}$']
#params = ['$\Delta$ '+x for x in params0]
ylim = [150, 20, 0.5, 0.5, 5e11]
ylim = [20, 10, 0.1, 0.1]
plt.close()
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(8,8), sharex='col', sharey='row')
# plot 2d bounds in a triangle fashion
Ndim = 3
#labels = columns
streams = np.array([-1,-2,-3,-4])
slist = streams[:reveal+1]
#for l, n in enumerate(slist):
for l, Ndim in enumerate([3, 4, 6]):
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arccos(v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.PuBu((l+3)/6), lw=3, label=label[l])
plt.gca().add_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
#plt.title('Marginalized ')
#plt.tight_layout()
plt.tight_layout(h_pad=0.0, w_pad=0.0)
plt.savefig('../plots/talk2/triangle_{}.png'.format(n))
#plt.savefig('../plots/talk2/triangle_{}.png'.format(reveal))
def talk_stream_comp(n=-1, vary=['progenitor', 'bary', 'halo'], plot='all', reveal=0, fast=False, scale=False):
"""Produce a triangle plot of 2D Cramer-Rao bounds for all model parameters using a given stream"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
alabel='_align'
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
label = ['GD-1', 'Pal 5', 'Triangulum']
#label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
#name = columns[int(np.abs(n)-1)]
#labels = ['RA, Dec, d', 'RA, Dec, d,\n$V_r$', 'RA, Dec, d,\n$V_r$, $\mu_\\alpha$, $\mu_\\delta$']
#params0 = ['$V_h$ (km/s)', '$R_h$ (kpc)', '$q_1$', '$q_z$', '$M_{LMC}$', '$X_p$', '$Y_p$', '$Z_p$', '$V_{xp}$', '$V_{yp}$', '$V_{zp}$']
#params = ['$\Delta$ '+x for x in params0]
ylim = [150, 20, 0.5, 0.5, 5e11]
ylim = [20, 10, 0.1, 0.1]
plt.close()
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(8,8), sharex='col', sharey='row')
# plot 2d bounds in a triangle fashion
Ndim = 3
#labels = columns
streams = np.array([-1,-2,-3,-4])
slist = streams[:reveal+1]
for l, n in enumerate(slist):
#for l, Ndim in enumerate([3, 4, 6]):
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.YlOrBr((l+3)/6), lw=3, label=label[l])
plt.gca().add_patch(e)
if l==0:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
#plt.title('Marginalized ')
#plt.tight_layout()
plt.tight_layout(h_pad=0.0, w_pad=0.0)
plt.savefig('../plots/talk2/comparison_{}.png'.format(reveal))
def test_ellipse():
""""""
th = np.radians(60)
v = np.array([[np.cos(th),np.sin(th)], [-np.sin(th),np.cos(th)]])
w = np.array([2,1])
plt.close()
plt.figure()
theta = np.degrees(np.arctan2(v[0][1], v[0][0]))
print(theta, np.degrees(th))
e = mpl.patches.Ellipse((0,0), width=w[0]*2, height=w[1]*2, angle=theta, fc='none', ec='k', lw=2)
plt.gca().add_artist(e)
plt.xlim(-5,5)
plt.ylim(-5,5)
def test_ellipse2():
""""""
v1 = np.array([1.5, -0.05])
v2 = np.array([0.01, 0.3])
c = np.outer(v1, v1) + np.outer(v2, v2)
w, v = np.linalg.eig(c)
print(w)
print(v)
plt.close()
plt.figure()
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
print(width/height)
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec='k', lw=2)
plt.gca().add_artist(e)
plt.xlim(-5,5)
plt.ylim(-5,5)
plt.savefig('../plots/test_ellipse.png')
def test_ellipse3():
""""""
v1 = np.array([-28., -8.])
v2 = np.array([6., -21.])
c = np.outer(v1, v1) + np.outer(v2, v2)
w, v = | np.linalg.eig(c) | numpy.linalg.eig |
# -*- coding: utf-8 -*-
"""
.. codeauthor:: <NAME> <<EMAIL>>
.. codeauthor:: <NAME> <<EMAIL>>
"""
import os
import pickle
import abc
import numpy as np
from torch.utils.data import Dataset
class DatasetBase(abc.ABC, Dataset):
def __init__(self):
self._camera = None
self._default_preprocessor = lambda x: x
self.preprocessor = self._default_preprocessor
def filter_camera(self, camera):
assert camera in self.cameras
self._camera = camera
return self
def __enter__(self):
return self
def __exit__(self, *exc):
self._camera = None
@abc.abstractmethod
def __len__(self):
pass
def __getitem__(self, idx):
sample = {'image': self.load_image(idx),
'depth': self.load_depth(idx),
'label': self.load_label(idx)}
if self.split != 'train':
# needed to compute mIoU on original image size
sample['label_orig'] = sample['label'].copy()
if self.with_input_orig:
sample['image_orig'] = sample['image'].copy()
sample['depth_orig'] = sample['depth'].copy().astype('float32')
sample = self.preprocessor(sample)
return sample
@property
@abc.abstractmethod
def cameras(self):
pass
@property
@abc.abstractmethod
def class_names(self):
pass
@property
@abc.abstractmethod
def class_names_without_void(self):
pass
@property
@abc.abstractmethod
def class_colors(self):
pass
@property
@abc.abstractmethod
def class_colors_without_void(self):
pass
@property
@abc.abstractmethod
def n_classes(self):
pass
@property
@abc.abstractmethod
def n_classes_without_void(self):
pass
@property
@abc.abstractmethod
def split(self):
pass
@property
@abc.abstractmethod
def depth_mode(self):
pass
@property
@abc.abstractmethod
def depth_mean(self):
pass
@property
@abc.abstractmethod
def depth_std(self):
pass
@property
@abc.abstractmethod
def source_path(self):
pass
@property
@abc.abstractmethod
def with_input_orig(self):
pass
@property
def camera(self):
return self._camera
@abc.abstractmethod
def load_image(self, idx):
pass
@abc.abstractmethod
def load_depth(self, idx):
pass
@abc.abstractmethod
def load_label(self, idx):
pass
def color_label(self, label, with_void=True):
if with_void:
colors = self.class_colors
else:
colors = self.class_colors_without_void
cmap = np.asarray(colors, dtype='uint8')
return cmap[label]
@staticmethod
def static_color_label(label, colors):
cmap = np.asarray(colors, dtype='uint8')
return cmap[label]
def compute_class_weights(self, weight_mode='median_frequency', c=1.02):
assert weight_mode in ['median_frequency', 'logarithmic', 'linear']
# build filename
class_weighting_filepath = os.path.join(
self.source_path, f'weighting_{weight_mode}_'
f'1+{self.n_classes_without_void}')
if weight_mode == 'logarithmic':
class_weighting_filepath += f'_c={c}'
class_weighting_filepath += f'_{self.split}.pickle'
if os.path.exists(class_weighting_filepath):
class_weighting = pickle.load(open(class_weighting_filepath, 'rb'))
print(f'Using {class_weighting_filepath} as class weighting')
return class_weighting
print('Compute class weights')
n_pixels_per_class = np.zeros(self.n_classes)
n_image_pixels_with_class = np.zeros(self.n_classes)
for i in range(len(self)):
label = self.load_label(i)
h, w = label.shape
current_dist = np.bincount(label.flatten(),
minlength=self.n_classes)
n_pixels_per_class += current_dist
# For median frequency we need the pixel sum of the images where
# the specific class is present. (It only matters if the class is
# present in the image and not how many pixels it occupies.)
class_in_image = current_dist > 0
n_image_pixels_with_class += class_in_image * h * w
print(f'\r{i+1}/{len(self)}', end='')
print()
# remove void
n_pixels_per_class = n_pixels_per_class[1:]
n_image_pixels_with_class = n_image_pixels_with_class[1:]
if weight_mode == 'linear':
class_weighting = n_pixels_per_class
elif weight_mode == 'median_frequency':
frequency = n_pixels_per_class / n_image_pixels_with_class
class_weighting = np.median(frequency) / frequency
elif weight_mode == 'logarithmic':
probabilities = n_pixels_per_class / np.sum(n_pixels_per_class)
class_weighting = 1 / np.log(c + probabilities)
if np.isnan(np.sum(class_weighting)):
print(f"n_pixels_per_class: {n_pixels_per_class}")
print(f"n_image_pixels_with_class: {n_image_pixels_with_class}")
print(f"class_weighting: {class_weighting}")
raise ValueError('class weighting contains NaNs')
with open(class_weighting_filepath, 'wb') as f:
pickle.dump(class_weighting, f)
print(f'Saved class weights under {class_weighting_filepath}.')
return class_weighting
def compute_depth_mean_std(self, force_recompute=False):
# ensure that mean and std are computed on train set only
assert self.split == 'train'
# build filename
depth_stats_filepath = os.path.join(
self.source_path, f'depth_{self.depth_mode}_mean_std.pickle')
if not force_recompute and os.path.exists(depth_stats_filepath):
depth_stats = pickle.load(open(depth_stats_filepath, 'rb'))
print(f'Loaded depth mean and std from {depth_stats_filepath}')
print(depth_stats)
return depth_stats
print('Compute mean and std for depth images.')
pixel_sum = np.float64(0)
pixel_nr = | np.uint64(0) | numpy.uint64 |
"""Used for simmyride
"""
import os
from PIL import Image
import numpy as np
import numpngw
import pdb
def separate_labels(npim, label_list, outpath):
"""
For converting the classifier results to unreal masks
"""
for i,label in enumerate(label_list):
out = np.zeros(npim.shape)
out[npim == i+1] = 65535
out = out.astype(np.uint16)
numpngw.write_png('%s.png' % os.path.join(outpath, label), out)
train_size_per_class = 400
def combine_masks(img_list, outfile):
"""
For making the training set
"""
im=Image.open(img_list[0]).convert('L')
acc = np.zeros( | np.array(im, dtype=np.float32) | numpy.array |
import os
import os.path
import sys
import numpy as np
import pytest
# Local files
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file and calls the
# functions below to ingest data. This is the only part of the script
# that should be executed when the script is imported, or else the
# Python data reader might misbehave.
# Data
np.random.seed(20190708)
_num_samples = 23
_sample_size = 7
_samples = np.random.normal(size=(_num_samples,_sample_size))
_samples = _samples.astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
trainer = lbann.Trainer()
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Layer graph
x = lbann.Input()
obj = lbann.L2Norm2(x)
layers = list(lbann.traverse_layer_graph(x))
metric = lbann.Metric(obj, name='obj')
callbacks = []
# Compute expected value with NumPy
vals = []
for i in range(num_samples()):
x = get_sample(i)
obj = np.inner(x, x)
vals.append(obj)
val = np.mean(vals)
tol = 8 * val * | np.finfo(np.float32) | numpy.finfo |
from __future__ import division, print_function, absolute_import
import os.path
import tempfile
import shutil
import numpy as np
import warnings
import glob
from numpy.testing import (assert_equal, dec, decorate_methods,
TestCase, run_module_suite, assert_allclose,
assert_array_equal)
from scipy import misc
try:
import PIL.Image
except ImportError:
_have_PIL = False
else:
_have_PIL = True
# Function / method decorator for skipping PIL tests on import failure
_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test')
datapath = os.path.dirname(__file__)
class TestPILUtil(TestCase):
def test_imresize(self):
im = np.random.random((10, 20))
for T in np.sctypes['float'] + [float]:
# 1.1 rounds to below 1.1 for float16, 1.101 works
im1 = misc.imresize(im, T(1.101))
assert_equal(im1.shape, (11, 22))
def test_imresize2(self):
im = np.random.random((20, 30))
im2 = misc.imresize(im, (30, 40), interp='bicubic')
assert_equal(im2.shape, (30, 40))
def test_imresize3(self):
im = np.random.random((15, 30))
im2 = misc.imresize(im, (30, 60), interp='nearest')
assert_equal(im2.shape, (30, 60))
def test_imresize4(self):
im = np.array([[1, 2],
[3, 4]])
# Check that resizing by target size, float and int are the same
im2 = misc.imresize(im, (4, 4), mode='F') # output size
im3 = misc.imresize(im, 2., mode='F') # fraction
im4 = misc.imresize(im, 200, mode='F') # percentage
assert_equal(im2, im3)
assert_equal(im2, im4)
def test_imresize5(self):
im = np.random.random((25, 15))
im2 = misc.imresize(im, (30, 60), interp='lanczos')
assert_equal(im2.shape, (30, 60))
def test_bytescale(self):
x = np.array([0, 1, 2], np.uint8)
y = np.array([0, 1, 2])
assert_equal(misc.bytescale(x), x)
assert_equal(misc.bytescale(y), [0, 127, 255])
def test_bytescale_keywords(self):
x = np.array([40, 60, 120, 200, 300, 500])
res_lowhigh = misc.bytescale(x, low=10, high=143)
assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143])
res_cmincmax = misc.bytescale(x, cmin=60, cmax=300)
assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255])
assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4])
def test_imsave(self):
picdir = os.path.join(datapath, "data")
for png in glob.iglob(picdir + "/*.png"):
with warnings.catch_warnings(record=True): # PIL ResourceWarning
img = misc.imread(png)
tmpdir = tempfile.mkdtemp()
try:
fn1 = os.path.join(tmpdir, 'test.png')
fn2 = os.path.join(tmpdir, 'testimg')
# PIL ResourceWarning
with warnings.catch_warnings(record=True):
misc.imsave(fn1, img)
misc.imsave(fn2, img, 'PNG')
# PIL ResourceWarning
with warnings.catch_warnings(record=True):
data1 = misc.imread(fn1)
data2 = misc.imread(fn2)
assert_allclose(data1, img)
assert_allclose(data2, img)
assert_equal(data1.shape, img.shape)
assert_equal(data2.shape, img.shape)
finally:
shutil.rmtree(tmpdir)
decorate_methods(TestPILUtil, _pilskip)
def tst_fromimage(filename, irange, shape):
fp = open(filename, "rb")
img = misc.fromimage(PIL.Image.open(fp))
fp.close()
imin, imax = irange
assert_equal(img.min(), imin)
assert_equal(img.max(), imax)
| assert_equal(img.shape, shape) | numpy.testing.assert_equal |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""loader"""
import random
import time
from collections import defaultdict
from multiprocessing import Process
import numpy as np
from .data_loader import DataLoader
data_column = [
'input_ids',
'position_ids',
'img_feat',
'img_pos_feat',
'audio_feat',
'audio_pos_ids',
'attention_mask',
'gather_index',
'txt_labels',
'txt_mask',
'txt_label_mask',
'img_mask_tgt',
'img_mask_tgt_mask',
'img_masks',
'mrc_label_target',
'mrfr_feat_target',
'audio_mask_tgt_mask',
'audio_masks',
'mafr_feat_target',
'itm_target',
'ma_neg_index',
'ma_neg_sample',
'mr_neg_index',
'mr_neg_sample',
'txt_gts',
'txt_masks',
'img_token_gts',
'img_token_masks',
'taskId'
]
task2id = {
'mlmThree': 0,
'mrcThree': 1,
'mrfrThree': 2,
'mafrThree': 3,
'macThree': 4,
"itmThree": 5,
'mrctThree': 6,
"tdThree": 7,
"idThree": 8,
"adThree": 9,
"ret": 10,
"ftRet": 11,
"ftCap": 12
}
task2id_open = {
'mlmThree_open': 0,
'mrcThree_open': 1,
'mrfrThree_open': 2,
'mafrThree_open': 3,
'macThree_open': 4,
"itmThree_open": 5,
'mrctThree_open': 6,
"tdThree_open": 7,
"idThree_open": 8,
"adThree_open": 9
}
class MetaLoader():
""" wraps multiple data loaders """
def __init__(self, loaders, batch_size=176, accum_steps=1, task_num=9, print_time=True):
assert isinstance(loaders, dict)
self.task_num = task_num
self.name2loader = {}
self.name2iter = {}
self.name2iter2 = {}
self.sampling_pools = []
self.loaders = loaders
for n, l in loaders.items():
if isinstance(l, tuple):
l, r = l
elif isinstance(l, DataLoader):
r = 1
else:
raise ValueError()
self.name2loader[n] = l
self.name2iter[n] = iter(l)
self.name2iter2[n] = iter(l)
self.sampling_pools.extend([n] * r)
self.task = self.sampling_pools[0]
self.task_label = [0] * self.task_num
self.step = 0
self.accum_steps = accum_steps
self.bs = batch_size
self.step_cnt = 0
self.flag = "iter1"
self.iter1_init_cnt = 0
self.iter2_init_cnt = 0
self.task_index_list = np.random.permutation(self.task_num)
random.seed(1)
self.all_ids = []
self.print_time = print_time
def init_iter(self, init_cnt):
while init_cnt < self.task_num:
local_task = self.sampling_pools[init_cnt]
iter_tmp = iter(self.name2loader[local_task])
if self.flag == 'iter1':
self.name2iter2[local_task] = iter_tmp
else:
self.name2iter[local_task] = iter_tmp
init_cnt += 1
def return_ids(self):
return self.all_ids
def get_batch_params(self, batch):
""" get_batch_params """
batch = defaultdict(lambda: None, batch)
input_ids = batch.get('input_ids', None)
position_ids = batch.get('position_ids', None)
img_feat = batch['img_feat'] # self.bs, 10,d 2048
img_pos_feat = batch['img_pos_feat'] # self.bs, 10, 7
audio_feat = batch['audio_feat'] # self.bs, 10, 512
audio_pos_ids = batch['audio_pos_ids'] # 1, 10
# attention_mask: 32 * 191
attention_mask = batch['attn_masks']
# gather_index 32 * 191
gather_index = batch['gather_index']
txt_labels = batch['txt_labels']
txt_mask = batch['txt_mask']
txt_label_mask = batch['txt_label_mask']
img_mask_tgt = batch['img_mask_tgt'] # self.bs, 72
img_mask_tgt_mask = batch['img_mask_tgt_mask'] # self.bs*2, 2
img_masks = batch['img_masks'] # self.bs, 10
mrc_label_target = batch['label_targets'] # self.bs*2, 1
audio_mask_tgt_mask = batch['audio_mask_tgt_mask']
audio_masks = batch['audio_masks']
mrfr_feat_target = batch.get('mrfr_feat_target', None)
mafr_feat_target = batch.get('mafr_feat_target', None)
itm_target = batch.get('targets', None)
ma_neg_index = batch.get('ma_neg_index', None)
ma_neg_sample = batch.get('ma_neg_sample', None)
mr_neg_index = batch.get('mr_neg_index', None)
mr_neg_sample = batch.get('mr_neg_sample', None)
txt_gts = batch.get('txt_gts', None)
txt_masks = batch.get('txt_masks', None)
img_token_gts = batch.get('img_token_gts', None)
img_token_masks = batch.get('img_token_masks', None)
return (input_ids, position_ids, img_feat, img_pos_feat, audio_feat,
audio_pos_ids, attention_mask, gather_index, txt_labels, txt_mask,
txt_label_mask, img_mask_tgt, img_mask_tgt_mask, img_masks, mrc_label_target,
mrfr_feat_target, audio_mask_tgt_mask, audio_masks, mafr_feat_target, itm_target,
ma_neg_index, ma_neg_sample, mr_neg_index, mr_neg_sample, txt_gts,
txt_masks, img_token_gts, img_token_masks)
def get_batch_check(self, batch, input_ids, position_ids, audio_feat,
audio_pos_ids, attention_mask, txt_labels, txt_mask,
txt_label_mask, img_mask_tgt, img_mask_tgt_mask, img_masks, mrc_label_target,
mrfr_feat_target, audio_mask_tgt_mask, audio_masks, mafr_feat_target, itm_target,
ma_neg_index, ma_neg_sample, mr_neg_index, mr_neg_sample, txt_gts,
txt_masks, img_token_gts, img_token_masks):
""" get_batch_check """
ids = batch.get('ids', None)
if ids is not None:
self.all_ids = self.all_ids + ids
self.bs = attention_mask.shape[0]
# text
if input_ids is None:
input_ids = np.zeros((self.bs, 30)).astype(np.int32)
if position_ids is None:
position_ids = np.zeros((1, 30)).astype(np.int32)
if txt_labels is None:
txt_labels = np.zeros((self.bs, 30)).astype(np.int32)
if txt_mask is None:
txt_mask = np.zeros((self.bs * 2, 5)).astype(np.int32)
if txt_label_mask is None:
txt_label_mask = np.zeros(self.bs * 2).astype(np.int32)
# image
if img_mask_tgt is None:
img_mask_tgt = np.zeros((self.bs, 90)).astype(np.bool_)
if img_mask_tgt_mask is None:
img_mask_tgt_mask = np.zeros((self.bs * 2, 5)).astype(np.int32)
if img_masks is None:
img_masks = np.zeros((self.bs, 30)).astype(np.bool_)
if mrc_label_target is None:
mrc_label_target = np.zeros((self.bs * 2, 1)).astype(np.float32)
# audio
if audio_feat is None:
audio_feat = np.zeros((self.bs, 30, 1024)).astype(np.float32) # 用attention_mask.shape[0]替换了self.bs
if audio_pos_ids is None:
audio_pos_ids = np.zeros((1, 30)).astype(np.int32)
if mrfr_feat_target is None:
mrfr_feat_target = np.zeros((self.bs * 2, 2048)).astype(np.float32)
if audio_mask_tgt_mask is None:
audio_mask_tgt_mask = np.zeros((self.bs * 2, 5)).astype(np.int32)
if audio_masks is None:
audio_masks = np.zeros((self.bs, 30)).astype(np.bool_)
if mafr_feat_target is None:
mafr_feat_target = np.zeros((self.bs * 2, 1024)).astype(np.float32)
if itm_target is None:
itm_target = np.zeros((self.bs,)).astype(np.int32)
if ma_neg_index is None:
ma_neg_index = np.zeros((self.bs * 2, 1)).astype(np.int32)
if ma_neg_sample is None:
ma_neg_sample = np.zeros((self.bs * 2, 30, 1024)).astype(np.float32)
if mr_neg_index is None:
mr_neg_index = np.zeros((self.bs * 2, 1)).astype(np.int32)
if mr_neg_sample is None:
mr_neg_sample = np.zeros((self.bs * 2, 30, 2048)).astype(np.float32)
if txt_gts is None:
txt_gts = np.zeros((self.bs, 90)).astype(np.int32)
if txt_masks is None:
txt_masks = np.ones((self.bs, 90)).astype(np.float32)
if img_token_gts is None:
img_token_gts = | np.zeros((self.bs, 64)) | numpy.zeros |
import numpy as np
from contextlib import contextmanager
from pytest import fixture, raises, mark
from unittest.mock import patch
import beyond.io.ccsds as ccsds
from beyond.dates import Date, timedelta
from beyond.io.tle import Tle
from beyond.propagators.keplernum import KeplerNum
from beyond.propagators.soi import SoINumerical
from beyond.env.solarsystem import get_body
from beyond.propagators.listeners import LightListener, NodeListener, find_event, ApsideListener
from beyond.orbits.man import ImpulsiveMan, KeplerianImpulsiveMan, ContinuousMan, KeplerianContinuousMan
import beyond.env.jpl as jpl
@fixture
def orbit_kepler(iss_tle):
orbit = iss_tle.orbit()
orbit.propagator = KeplerNum(
timedelta(seconds=60),
bodies=get_body('Earth')
)
return orbit
@fixture
def molniya_kepler(molniya_tle):
molniya = molniya_tle.orbit()
molniya.propagator = KeplerNum(
timedelta(seconds=120),
bodies=get_body('Earth')
)
return molniya
@contextmanager
def mock_step(orb):
with patch('beyond.propagators.keplernum.KeplerNum._make_step', wraps=orb.propagator._make_step) as mock:
yield mock
def count_steps(td, step, inclusive=True):
"""Count how many steps it take to travel td
Args:
td (timedelta)
step (timedelta)
Return:
int
"""
inclusive = 1 if inclusive else 0
return abs(td) // step + inclusive
def plot_delta_a(dates, altitude, eccentricity=None):
import matplotlib.pyplot as plt
fig = plt.figure()
g1 = fig.add_subplot(111)
p1, = g1.plot(dates, altitude, label="altitude", color="orange")
g1.set_ylabel("Altitude (m)")
g1.yaxis.label.set_color(p1.get_color())
g1.grid(ls=":")
if eccentricity:
g2 = g1.twinx()
p2, = g2.plot(dates, eccentricity, label="eccentricity")
g2.set_ylabel("Eccentricity")
g2.yaxis.label.set_color(p2.get_color())
g2.set_yscale('log')
plt.tight_layout()
plt.show()
def test_propagate_rk4(orbit_kepler):
orbit_kepler.propagator.method = KeplerNum.RK4
assert orbit_kepler.date == Date(2018, 5, 4, 13, 20, 47, 630976)
# simple propagation with a Date object
orb2 = orbit_kepler.propagate(orbit_kepler.date + timedelta(minutes=121, seconds=12))
assert orb2.date == Date(2018, 5, 4, 15, 21, 59, 630976)
assert orb2.propagator.orbit is None # brand new propagator
# simple propagation with a timedelta object
orb3 = orb2.propagate(timedelta(minutes=12, seconds=5))
# Check if the propagator.orbit is initializd for orb2
# and not yet initialized for orb3
assert orb3.date == Date(2018, 5, 4, 15, 34, 4, 630976)
assert orb2.propagator.orbit is not None
assert orb3.propagator.orbit is None
assert np.allclose(
orb3,
[-2267347.5906591383, 3865612.1569156954, -5093932.5567979375, -5238.634675262262, -5326.282920539333, -1708.6895889357945]
)
# simple propagation with a negative step
orb4 = orb3.propagate(timedelta(minutes=-15))
assert orb4.date == orb3.date - timedelta(minutes=15)
def test_micro_step(orbit_kepler):
with mock_step(orbit_kepler) as mock:
# Propagation with micro-step (< to the Kepler propagator step size)
orb2 = orbit_kepler.propagate(orbit_kepler.date + timedelta(seconds=20))
assert orb2.date == orbit_kepler.date + timedelta(seconds=20)
assert mock.call_count == 7
with mock_step(orbit_kepler) as mock:
# negative micro-step
orb2 = orbit_kepler.propagate(orbit_kepler.date - timedelta(seconds=20))
assert orb2.date == orbit_kepler.date - timedelta(seconds=20)
assert mock.call_count == 7
def test_propagate_euler(orbit_kepler):
orbit_kepler.propagator.method = KeplerNum.EULER
assert orbit_kepler.date == Date(2018, 5, 4, 13, 20, 47, 630976)
orb2 = orbit_kepler.propagate(orbit_kepler.date + timedelta(minutes=121, seconds=12))
assert orb2.date == Date(2018, 5, 4, 15, 21, 59, 630976)
assert orb2.propagator.orbit is None # brand new propagator
orb3 = orb2.propagate(timedelta(minutes=12, seconds=5))
assert orb3.date == Date(2018, 5, 4, 15, 34, 4, 630976)
assert orb2.propagator.orbit is not None
assert orb3.propagator.orbit is None
assert np.allclose(
np.array(orb3),
[-880124.9759610161, -10453560.873778934, 6457874.859314914, 4109.877000752121, 1881.4035807734163, 2961.5286009903316]
)
def test_propagate_dopri(orbit_kepler):
orbit_kepler.propagator.method = KeplerNum.DOPRI54
assert orbit_kepler.date == Date(2018, 5, 4, 13, 20, 47, 630976)
orb2 = orbit_kepler.propagate(orbit_kepler.date + timedelta(minutes=121, seconds=12))
assert orb2.date == Date(2018, 5, 4, 15, 21, 59, 630976)
assert orb2.propagator.orbit is None # brand new propagator
orb3 = orb2.propagate(timedelta(minutes=12, seconds=5))
assert orb3.date == Date(2018, 5, 4, 15, 34, 4, 630976)
assert orb2.propagator.orbit is not None # This propagator has been used
assert orb3.propagator.orbit is None # This one not
assert np.allclose(
np.array(orb3),
[-2267319.8725340427, 3865646.423538732, -5093927.810461366, -5238.647479926973, -5326.249640066392, -1708.7264386468821]
)
def test_iter(orbit_kepler):
data = [p for p in orbit_kepler.iter(stop=timedelta(minutes=120))]
assert len(data) == 121
assert min(data, key=lambda x: x.date).date == orbit_kepler.date
assert max(data, key=lambda x: x.date).date == orbit_kepler.date + timedelta(minutes=120)
for p in data:
# Check that no created Orbit object has an initialized propagator
# i.e. that the propagation is done only by the propagator of orbit_kepler
# This happened during development when dealing with listeners and should not happen
# again due to the use of Ephem inside KeplerNum
assert p.propagator.orbit is None
data2 = [p for p in orbit_kepler.iter(stop=timedelta(minutes=120))]
assert data[0].date == data2[0].date
assert all(data[0] == data2[0])
assert data[0] is not data2[0]
# TODO Test retropolation then extrapolation
# same but with step interpolation
def test_iter_on_dates(orbit_kepler):
# Generate a free step ephemeris
start = orbit_kepler.date
stop = timedelta(hours=3)
step = timedelta(seconds=10)
drange = Date.range(start, stop, step, inclusive=True)
ephem = orbit_kepler.ephem(dates=drange)
assert ephem.start == start
assert ephem.stop == start + stop
assert ephem[1].date - ephem[0].date == step
for p in ephem:
assert p.propagator.orbit is None
def test_duty_cycle(orbit_kepler):
with mock_step(orbit_kepler) as mock:
date = Date(2018, 5, 4, 15)
orbit_kepler.propagate(date)
assert mock.call_count == count_steps(orbit_kepler.date - date, orbit_kepler.propagator.step)
assert mock.call_count == 100
with mock_step(orbit_kepler) as mock:
date = orbit_kepler.date - timedelta(seconds=652)
orbit_kepler.propagate(date)
assert mock.call_count == count_steps(orbit_kepler.date - date, orbit_kepler.propagator.step)
assert mock.call_count == 11
with mock_step(orbit_kepler) as mock:
start = Date(2018, 5, 4, 13)
stop = start + timedelta(minutes=90)
data = []
for p in orbit_kepler.iter(start=start, stop=stop):
data.append(p)
assert len(data) == 91
assert data[0].date == start
assert data[-1].date == stop
assert mock.call_count == (
count_steps(orbit_kepler.date - start, orbit_kepler.propagator.step)
+ count_steps(stop - start, orbit_kepler.propagator.step, False)
)
# assert mock.call_count == 125
def test_listener(orbit_kepler):
with mock_step(orbit_kepler) as mock:
start = Date(2018, 5, 4, 13)
stop = start + timedelta(minutes=90)
data = []
for p in orbit_kepler.iter(start=start, stop=stop, listeners=LightListener()):
data.append(p)
assert len(data) == 93
assert mock.call_count == (
count_steps(orbit_kepler.date - start, orbit_kepler.propagator.step)
+ count_steps(stop - start, orbit_kepler.propagator.step, False)
)
# assert mock.call_count == 111
events = [x for x in data if x.event]
assert len(events) == 2
assert events[0].date == Date(2018, 5, 4, 13, 8, 38, 869128)
assert events[0].event.info == "Umbra exit"
assert events[1].date == Date(2018, 5, 4, 14, 5, 21, 256924)
assert events[1].event.info == "Umbra entry"
with mock_step(orbit_kepler) as mock:
start = Date(2018, 5, 4, 13)
stop = start + timedelta(minutes=90)
data = []
for p in orbit_kepler.iter(start=start, stop=stop, listeners=ApsideListener()):
data.append(p)
assert len(data) == 93
assert mock.call_count == (
count_steps(orbit_kepler.date - start, orbit_kepler.propagator.step)
+ count_steps(stop - start, orbit_kepler.propagator.step, False)
)
# assert mock.call_count == 125
events = [x for x in data if x.event]
assert len(events) == 2
assert str(events[0].date) == "2018-05-04T13:08:30.765143 UTC"
assert events[0].event.info == "Periapsis"
assert str(events[1].date) == "2018-05-04T13:54:50.178229 UTC"
assert events[1].event.info == "Apoapsis"
def test_man_impulsive(molniya_kepler):
# Test of a circularisation of a molniya orbit
# At apogee, this is roughly 1400 m/s
with raises(ValueError):
ImpulsiveMan(Date(2018, 9, 20, 13, 48, 21, 763091), (28, 0, 0, 0))
apo = find_event(molniya_kepler.iter(stop=timedelta(hours=26), listeners=ApsideListener()), 'Apoapsis', offset=1)
man = ImpulsiveMan(apo.date, (1427., 0, 0), frame="TNW")
# Check on the sensitivity of the find_event function
apo2 = find_event(molniya_kepler.iter(start=molniya_kepler.date + timedelta(seconds=243, minutes=5), stop=timedelta(hours=26), listeners=ApsideListener()), 'Apoapsis', offset=1)
assert abs(apo.date - apo2.date) < timedelta(seconds=1)
molniya_kepler.maneuvers = man
altitude = []
eccentricity = []
dates = []
for p in molniya_kepler.iter(stop=timedelta(hours=36)):
altitude.append(p.copy(form='spherical').r - p.frame.center.body.r)
eccentricity.append(p.copy(form="keplerian").e)
dates.append(p.date.datetime)
# plot_delta_a(dates, altitude, eccentricity)
# retrieve the index of the first point after the maneuver
man_idx = (np.array(dates) > man.date.datetime).argmax()
alt_before = np.mean(altitude[:man_idx])
alt_after = np.mean(altitude[man_idx:])
ecc_before = np.mean(eccentricity[:man_idx])
ecc_after = np.mean(eccentricity[man_idx:])
assert abs(ecc_before - 6.47e-1) < 2e-4
assert abs(ecc_after - 3e-3) < 2e-4
# assert abs(ecc_after - 6.57e-4) < 1e-6
assert str(man.date) == "2018-05-03T16:29:23.246451 UTC"
# 8'000 km increment in altitude
assert 8000000 < alt_after - alt_before < 8200000
def test_man_delta_a(molniya_kepler):
apo = find_event(molniya_kepler.iter(stop=timedelta(hours=26), listeners=ApsideListener()), 'Apoapsis', offset=1)
man1 = KeplerianImpulsiveMan(apo.date, da=5900000)
molniya_kepler.maneuvers = man1
altitude = []
dates = []
for p in molniya_kepler.iter(stop=timedelta(hours=26)):
altitude.append(p.copy(form='spherical').r - p.frame.center.body.r)
dates.append(p.date.datetime)
# plot_delta_a(dates, altitude)
man_idx = (np.array(dates) > man1.date.datetime).argmax()
before = np.mean(altitude[:man_idx])
after = np.mean(altitude[man_idx:])
assert int( | np.linalg.norm(man1._dv) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
"""
模型评估
Authors: dongrenguang(<EMAIL>)
Date: 2021/10/24
"""
import abc
import numpy as np
from ..core import Node
class Metric(Node):
"""评估指标算子抽象基类
"""
def __init__(self, *parents, **kargs):
kargs['need_save'] = kargs.get('need_save', False)
Node.__init__(self, *parents, **kargs)
self.init()
def reset(self):
self.reset_value()
self.init()
@abc.abstractmethod
def init(self):
# 初始化节点,由具体子类实现
pass
@staticmethod
def prob_to_label(prob, threshold=0.5):
"""将预估值转化为标签
"""
if prob.shape[0] > 1:
# 多分类,预测类别为概率最大的类别
labels = np.zeros((prob.shape[0], 1))
labels[ | np.argmax(prob, axis=0) | numpy.argmax |
import numpy as np
def cube_gradient(cube, idx_min, steps):
grad = np.array(np.gradient(cube, *steps))
return grad[:, idx_min[0], idx_min[1], idx_min[2]]
def gradient_norm(cube, idx_min, steps):
window = 3
window_slices = []
for i_min in idx_min:
x_slice = slice(i_min - window // 2, i_min + window // 2 + 1)
window_slices.append(x_slice)
gradient = cube_gradient(cube[tuple(window_slices)], idx_min, steps)
return np.linalg.norm(gradient)
def cube_hessian(cube, idx_min, steps):
window = 5
window_slices = []
for i_min in idx_min:
x_slice = slice(i_min - window // 2, i_min + window // 2 + 1)
window_slices.append(x_slice)
hessian = np.zeros((cube.ndim, cube.ndim) + cube[idx_min].shape)
for i, grad_i in enumerate( | np.gradient(cube, *steps) | numpy.gradient |
"""This python module implements the class ``ClassifierComparision``
which can be used to compare the accuracy results of two different
classification results (e.g. results from different fruits.Fruit
objects).
This file can also be used as a scripted invoked from the command line.
You get all available arguments with
>>> python configs_compare.py -h
The module can also be used without any dependencies to fruits.
"""
import os
import argparse
from typing import List, Union, Tuple
import networkx as nx
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from fruitalyser import _get_color
DEFAULT_COMPARISION_COLUMN = "FRUITS Acc"
class ClassifierComparision:
"""Implements methods for the comparision of two classification
techniques using the information of their accuracy on different
datasets.
:param acc1: A one dimensional numpy array containing accuracy
results of one technique (the one that is expected to be
better in general) for different datasets.
:type acc1: np.ndarray
:param acc2: A one dimensional numpy array containing accuracy
results of a second technique.
:type acc2: np.ndarray
:param label1: Short label that describes the first technique.
:type label1: str
:param label2: Short label that describes the second technique.
:type label2: str
"""
def __init__(self,
accuracies: np.ndarray,
labels: List[str]):
self._ndatasets = accuracies.shape[0]
self._nclassifiers = accuracies.shape[1]
if len(labels) != self._nclassifiers:
raise ValueError("Lengths of accuracies and labels differ")
self._accuracies = accuracies.copy()
maximum = self._accuracies.max()
if maximum > 1.0:
self._accuracies /= maximum
self._labels = labels
def scatterplot(self,
indices: Union[List[Tuple[int, int]], None] = None,
opacity: Union[List[float], None] = None) -> tuple:
"""Creates a 2D scatter plot for each pair of the given
accuracy results.
:param indices: List of integer pairs that define which methods
to compare. If ``None`` is given, then all plots will be
compared.
:type indices: Union[List[Tuple[int]], None], optional
:param opacity: List of floats that has the same length as
the original accuracy results. The points in the scatter
plot will be colored based on the values in this list.,
defaults to None
:type opacity: Union[List[float], None], optional
:returns: Figure and axis that you get from ``plt.subplots``.
:rtype: tuple
"""
colors = np.zeros((self._ndatasets, 4))
colors[:, :3] = _get_color(0)
colors[:, 3] = opacity
if indices is None:
indices = [(i, j)
for i in range(self._nclassifiers)
for j in range(self._nclassifiers)]
fig, axs = plt.subplots(self._nclassifiers, self._nclassifiers)
else:
fig, axs = plt.subplots(len(indices), 1)
if len(indices) == 1:
axs = np.array([axs], dtype=object)
axs = axs.reshape((len(indices), 1))
c = 0
for i in range(axs.shape[0]):
for j in range(axs.shape[1]):
ii, jj = indices[c]
axs[i][j].axis('square')
axs[i][j].set_xlim([0, 1])
axs[i][j].set_ylim([0, 1])
if ii == jj:
weights = np.ones_like(self._accuracies[:, ii])
weights /= self._ndatasets
axs[i][j].hist(
self._accuracies[:, ii],
weights=weights,
)
else:
axs[i][j].scatter(
self._accuracies[:, jj], self._accuracies[:, ii],
c=opacity,
cmap="copper_r",
)
axs[i][j].plot([0, 1], [0, 1],
transform=axs[i][j].transAxes,
color=_get_color(1), ls="--")
axs[i][j].plot([0.05, 1], [0, 0.95],
transform=axs[i][j].transAxes,
color=_get_color(1)+(0.3,), ls="--")
axs[i][j].plot([0, 0.95], [0.05, 1],
transform=axs[i][j].transAxes,
color=_get_color(1)+(0.3,), ls="--")
meanii = self._accuracies[:, ii].mean()
meanjj = self._accuracies[:, jj].mean()
axs[i][j].axhline(meanii, xmin=0, xmax=meanii,
color=_get_color(3)+(0.5,), ls="--")
axs[i][j].axvline(meanjj, ymin=0, ymax=meanjj,
color=_get_color(3)+(0.5,), ls="--")
axs[i][j].text(0.02, 0.98, self._labels[ii],
size="large", ha="left", va="top")
axs[i][j].text(0.98, 0.02, self._labels[jj],
size="large", ha="right", va="bottom")
c += 1
return fig, axs
def test_greater(self, i: int, j: int):
"""Tests whether the null-hypothesis of technique at index ``i``
being less or equally good compared to method ``j`` can be
rejected by performing an one-sided paired Wilcoxon signed-rank
test.
:type i: int
:type j: int
:returns: Value of the test function and p-value of the test.
:rtype: tuple
"""
stat, p = sp.stats.wilcoxon(self._accuracies[:, i],
self._accuracies[:, j],
alternative="greater")
return stat, p
def critical_difference_diagram(self, alpha: float = 0.05):
"""Draws and returns a figure of a critical difference diagram
based on the accuracies given to the class object.
This type of plot was described in the paper
'Statistical Comparision of Classifiers over Multiple Data Sets'
by <NAME>, 2006.
:param alpha: Significance value used for doing pairwise
Wilcoxon signed-rank tests., defaults to 0.05
:type alpha: float, optional
:returns: Figure and axis that matches to the return types of
``plt.subplots(1, 1)``.
:rtype: tuple
"""
p = np.zeros((int(self._nclassifiers * (self._nclassifiers-1) / 2),),
dtype=np.float32)
c = 0
for i in range(self._nclassifiers - 1):
for j in range(i+1, self._nclassifiers):
p[c] = sp.stats.wilcoxon(self._accuracies[:, i],
self._accuracies[:, j],
zero_method='pratt')[1]
c += 1
p_order = np.argsort(p)
holm_bonferroni = alpha / | np.arange(p.shape[0], 0, -1) | numpy.arange |
import numpy as np
import cv2
from util.global_variables import GlobalVar
def get_undistorted_image(img):
# Use cv2.calibrateCamera() and cv2.undistort()
# Convert to grayscale
grayscaleimage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
obj_points, img_points, = GlobalVar().ret_calib_points()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, grayscaleimage.shape[::-1], None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
def get_threshold_binary_image(img):
gradx = apply_abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(20, 100))
grady = apply_abs_sobel_thresh(img, orient='y', sobel_kernel=3, thresh=(20, 100))
mag_binary = apply_mag_thresh(img, sobel_kernel=9, mag_thresh=(30, 100))
dir_binary = apply_dir_threshold(img, sobel_kernel=15, thresh=(0.7, 1.3))
color_transform_binary = apply_color_transform(img, s_thresh=(170, 255), sx_thresh=(20, 100))
combined = np.zeros_like(color_transform_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) | (color_transform_binary == 1)] = 1
return combined
def apply_abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if (orient == 'x'):
sobelderivative = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
elif orient == 'y':
sobelderivative = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
abs_sobel_dt = | np.absolute(sobelderivative) | numpy.absolute |
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import argparse
from torch.autograd import Variable
import torch.utils.data as data
#from data import v2, v1, AnnotationTransform, VOCDetection, detection_collate, VOCroot, VOC_CLASSES
from data import FISHdetection, detection_collate, v2, v1, BaseTransform
from utils.augmentations import SSDAugmentation
from layers.modules import MultiBoxLoss
from ssd_multiphase_custom_512_group import build_ssd
import numpy as np
import time
import h5py
from sklearn.model_selection import train_test_split, KFold
import copy
from test_ap import test_net
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training')
parser.add_argument('--version', default='v2', help='conv11_2(v2) or pool6(v1) as last layer')
parser.add_argument('--basenet', default='vgg16_reducedfc.pth', help='pretrained base model')
parser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching')
parser.add_argument('--batch_size', default=16, type=int, help='Batch size for training')
parser.add_argument('--resume', default=None, type=str, help='Resume from checkpoint')
parser.add_argument('--num_workers', default=1, type=int, help='Number of workers used in dataloading')
# parser.add_argument('--iterations', default=120000, type=int, help='Number of training iterations')
parser.add_argument('--start_iter', default=0, type=int, help='Begin counting iterations starting from this value (should be used with resume)')
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')
parser.add_argument('--lr', '--learning-rate', default=5e-4, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float, help='Gamma update for SGD')
parser.add_argument('--log_iters', default=True, type=bool, help='Print the loss at each iteration')
parser.add_argument('--visdom', default=True, type=str2bool, help='Use visdom to for loss visualization')
parser.add_argument('--send_images_to_visdom', type=str2bool, default=False, help='Sample a random image from each 10th batch, send it to visdom after augmentations step')
parser.add_argument('--save_folder', default='weights/', help='Location to save checkpoint models')
# parser.add_argument('--voc_root', default=VOCroot, help='Location of VOC root directory')
args = parser.parse_args()
if args.cuda and torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
#cfg = (v1, v2)[args.version == 'v2']
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
"""#################### Hyperparameters ####################"""
ssd_dim = 512
# current CT dataset has mean pixel val of 33.5
means = (34, 34, 34)
num_classes = 2 # lesion or background
batch_size = args.batch_size
#accum_batch_size = 32
#iter_size = accum_batch_size / batch_size
max_iter = 10001
weight_decay = 0.0005
stepvalues = (5000, 8000)
gamma = 0.1
momentum = 0.9
# use batchnorm for vgg & extras
batch_norm = True
# OHNM (online hard neg mining) ratio (pos:neg = 1:x)
ohnm_neg_ratio = 1
# data augmentation hyperparams
gt_pixel_jitter = 0.01
expand_ratio = 1.5
# CV hyperparams
cross_validation = 5
# ap hyperparam
confidence_threshold = 0.01
# string for output & weight name logging
output_string = 'ssd512_group_vanilla_BN_10CV'
"""#########################################################"""
if args.visdom:
import visdom
viz = visdom.Visdom()
""""########## Data Loading & dimension matching ##########"""
# load custom CT dataset
datapath = '/home/vision/tkdrlf9202/Datasets/liver_lesion_aligned/lesion_dataset_4phase_aligned.h5'
train_sets = [('liver_lesion')]
def load_lesion_dataset(data_path):
"""
loads custom liver dataset
if preprocessed h5 data exists, load it
if not, load and preprocess raw liver dataset
:param data_path:
:return: flattened CT and mask data
"""
# check if the preprocessed dataset exists
if os.path.isfile(data_path):
# load the preprocessed dataset dump
print('loading lesion dataset...')
with h5py.File(data_path, 'r') as dataset_h5:
group_ct = dataset_h5['ct']
group_coordinate = dataset_h5['coordinate']
ct = [i[:] for i in group_ct.values()]
coordinate = [i[:] for i in group_coordinate.values()]
dataset_h5.close()
return ct, coordinate
ct, coord = load_lesion_dataset(datapath)
# ct: [subjects, sample, phase, channel, 512, 512]
# coord: [subjects, sample, phase, channel, 5], [x_min, y_min, x_max, y_max, 0 (lesion class label)] format
# make channels last & 0~255 uint8 image
for idx in range(len(ct)):
ct[idx] = np.transpose(ct[idx] * 255, [0, 1, 3, 4, 2]).astype(dtype=np.uint8)
# use only coordinate from the middle slice, ditch the upper & lower ones
coord[idx] = coord[idx][:, :, 1, :]
""" use CV instead
# split train & valid set, subject-level (without shuffle)
ct_train, ct_valid, coord_ssd_train, coord_ssd_valid = train_test_split(ct, coord, test_size=0.1, shuffle=False)
"""
# 5-fold CV
kf = KFold(n_splits=cross_validation)
kf.get_n_splits(ct, coord)
# flatten the subject & sample dimension for each sets by stacking
ct_train = []
ct_valid = []
coord_ssd_train = []
coord_ssd_valid = []
for train_index, valid_index in kf.split(ct):
ct_train_part = [ct[x] for ind, x in enumerate(train_index)]
ct_valid_part = [ct[x] for ind, x in enumerate(valid_index)]
coord_train_part = [coord[x] for ind, x in enumerate(train_index)]
coord_valid_part = [coord[x] for ind, x in enumerate(valid_index)]
ct_train.append( | np.vstack(ct_train_part) | numpy.vstack |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: <NAME>
## Computer Vision Center (CVC). Universitat Autonoma de Barcelona
## Email: <EMAIL>
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from datasets import omniglot
import torchvision.transforms as transforms
from PIL import Image
import os.path
import json
import math
from numpy import array
import numpy as np
#moved inside constructor
#np.random.seed(2191) # for reproducibility
# LAMBDA FUNCTIONS
filenameToPILImage = lambda x: Image.open(x).convert('L')
PiLImageResize = lambda x: x.resize((28,28))
np_reshape = lambda x: np.reshape(x, (28, 28, 1))
def base_classes_file_data( base_classes_file ):
return array( json.load( open( base_classes_file ) ) )
class OmniglotNShotDataset():
def __init__(self, dataroot, batch_size = 100, classes_per_set=10, samples_per_class=1, is_use_sample_data = True, input_file="", input_labels_file="", total_input_files=-1, is_evaluation_only = False, evaluation_input_file = "", evaluation_labels_file = "", evaluate_classes = 1, is_eval_with_train_data = 0, negative_test_offset = 0, is_apply_pca_first = 0, cache_samples_for_evaluation = 100, is_run_time_predictions = False, pca_components = 900, is_evaluation_res_in_obj = False, total_base_classes = 0, is_visualize_data = False, is_run_validation_batch = True, is_compare = False, is_load_test_record = False, test_record_class = -1, test_record_index = -1, is_debug = True, is_switch_dim = False, is_batch_persistancy = False, is_load_file_data_only=False, test_batch_records=20):
self.is_debug = is_debug
if is_evaluation_only == False:
np.random.seed(2191) # for reproducibility
else:
#for variational testing
np.random.seed( np.random.randint(0, 1000) )
if is_use_sample_data:
if not os.path.isfile(os.path.join(dataroot,'data.npy')):
self.x = omniglot.OMNIGLOT(dataroot, download=True,
transform=transforms.Compose([filenameToPILImage,
PiLImageResize,
np_reshape]))
"""
# Convert to the format of AntreasAntoniou. Format [nClasses,nCharacters,28,28,1]
"""
temp = dict()
for (img, label) in self.x:
if label in temp:
temp[label].append(img)
else:
temp[label]=[img]
self.x = [] # Free memory
for classes in temp.keys():
self.x.append(np.array(temp[ list(temp.keys())[classes]]))
self.x = np.array(self.x)
temp = [] # Free memory
np.save(os.path.join(dataroot,'data.npy'),self.x)
else:
self.x = np.load(os.path.join(dataroot,'data.npy'))
else:
self.x = []
self.x_to_be_predicted = []
self.x_to_be_predicted_cls_indexes = {}
self.prediction_classes = 9
self.total_base_classes = total_base_classes #56
self.tvt_records = 500 #70 # 25 #11 #3 #19
self.tvt_records_fall_short_clss = {}
self.re_records = 0 #2 #2 #10
self.choice_replace = True #necessary when number of samples are small
self.is_batch_persistancy = is_batch_persistancy
base_classes_file = input_file+"_base_classes.json"
self.evaluate_classes = evaluate_classes
self.is_eval_with_train_data = True if is_eval_with_train_data == 1 else False
self.negative_test_offset = negative_test_offset
self.is_run_time_predictions = is_run_time_predictions
self.is_evaluation_res_in_obj = is_evaluation_res_in_obj
self.test_batch_records = test_batch_records
is_disable_heavy_functions_temporarily = True
main_lsize = 1
if not os.path.exists( base_classes_file ) and is_load_test_record:
main_lsize = 2
#
if is_evaluation_only == False or not os.path.exists( base_classes_file ) or is_load_test_record:
if is_debug:
print( "(!) Merging inputs, should only be executed in training mode." )
input = []
input_labels = []
if is_debug:
print("total_input_files")
print(total_input_files)
for i in range(0, total_input_files):
if is_debug:
print("total_input_files i " + str(i))
if i == 0:
input = array( json.load( open( input_file.replace('{i}', str(i)) ) ) )
input_labels = array( json.load( open( input_labels_file.replace('{i}', str(i)) ) ) )
else:
input = np.concatenate( ( input, array( json.load( open( input_file.replace('{i}', str(i)) ) ) ) ), axis=0 )
input_labels = np.concatenate( ( input_labels, array( json.load( open( input_labels_file.replace('{i}', str(i)) ) ) ) ), axis=0 )
temp = dict()
temp_to_be_predicted = dict()
sizei = len(input)
if is_debug:
print("sizei")
print(sizei)
test_record_index_cnt = -1
for li in range(0, main_lsize):
for i in np.arange(sizei):
#if is_evaluation_only == True and input_labels[i] >= self.prediction_classes:
# continue
if input_labels[i] >= self.total_base_classes:
continue
if is_load_test_record and (main_lsize == 1 or li == 1):
if input_labels[i] == test_record_class:
test_record_index_cnt = test_record_index_cnt + 1
if test_record_index_cnt == test_record_index:
self.evaluation = np.zeros( ( self.total_base_classes, self.tvt_records, input[i].shape[0], input[i].shape[1], 1 ) )
self.evaluation[:,:,:,:,:] = input[i][:,:,np.newaxis]
break
else:
if input_labels[i] in temp:
if len( temp[input_labels[i]] ) >= self.tvt_records: #only 20 samples per class
if self.re_records > 0 and is_evaluation_only == False and (input_labels[i] < self.total_base_classes or np.mod( input_labels[i] - self.total_base_classes, 30 ) == 0 or np.mod( input_labels[i] - (self.total_base_classes+1), 30 ) == 0): #True or False and (True or input_labels[i] == 6):
lbl_val = input_labels[i]
if input_labels[i] >= self.total_base_classes and np.mod( input_labels[i] - self.total_base_classes, 30 ) == 0:
lbl_val = self.total_base_classes + int( (input_labels[i] - self.total_base_classes) / 30 )
if input_labels[i] >= self.total_base_classes and np.mod( input_labels[i] - (self.total_base_classes+1), 30 ) == 0:
lbl_val = (self.total_base_classes*2) + int( (input_labels[i] - (self.total_base_classes+1)) / 30 )
if lbl_val in temp_to_be_predicted:
if len( temp_to_be_predicted[lbl_val] ) >= self.re_records: #only 20 samples per class
continue
temp_to_be_predicted[lbl_val].append( input[i][:,:,np.newaxis] )
else:
temp_to_be_predicted[lbl_val]=[input[i][:,:,np.newaxis]]
continue
temp[input_labels[i]].append( input[i][:,:,np.newaxis] )
else:
temp[input_labels[i]]=[input[i][:,:,np.newaxis]]
print( "temp.keys()" )
unique, counts = np.unique(input_labels, return_counts=True)
tmpdict = dict(zip(unique, counts))
print( tmpdict )
print( sorted(tmpdict.items(), key=lambda x: x[1], reverse=True) )
print( temp[0][0] )
input = [] # Free memory
input_labels = [] # Free memory
self.x = [] # Free memory
if not is_load_test_record or main_lsize == 2:
print( " tvt_records_fall_short_clss 1 ", self.tvt_records_fall_short_clss, " 1 " )
print( "temp.keys ", temp.keys() )
for classes in temp.keys():
if False:
self.x.append(np.array(temp[ list(temp.keys())[classes]]))
self.tvt_records_fall_short_clss[classes] = len(self.x[len(self.x)-1])
else:
#print( "list(temp.keys())[classes] ", list(temp.keys())[classes], classes )
self.x.append(np.array(temp[ list(temp.keys())[classes]]))
print( "classes ", classes, len(self.x[len(self.x)-1]), self.test_batch_records)
#TODO temp. below is temporary adaption to fix problem of less data for test, shouldn't be used permanantly and must train model again with less test batch record size
if len(self.x[len(self.x)-1]) - self.test_batch_records <= 0:
print( "self.x shape ", self.x[len(self.x)-1].shape )
ara = np.zeros( ( 10, self.x[len(self.x)-1].shape[1], self.x[len(self.x)-1].shape[2], self.x[len(self.x)-1].shape[3] ) )
for ari in range(0, ((len(self.x[len(self.x)-1]) - self.test_batch_records) * -1) + 10):
ara[ari, :, :, :] = self.x[len(self.x)-1][ari, :, :, :]
print( "self.x shape ", self.x[len(self.x)-1].shape, ara.shape )
self.x[len(self.x)-1] = np.concatenate( ( self.x[len(self.x)-1], ara ), axis=0 )
print( "classes ", classes, len(self.x[len(self.x)-1]), self.test_batch_records)
self.tvt_records_fall_short_clss[classes] = len(self.x[len(self.x)-1]) - self.test_batch_records #20
if len(self.x[len(self.x)-1]) < self.tvt_records:
#print( "self.x.shape ", self.x[len(self.x)-1].shape, len(self.x[len(self.x)-1]), self.x[len(self.x)-1] )
self.x[len(self.x)-1] = np.concatenate( ( self.x[len(self.x)-1], np.zeros( [ self.tvt_records - len(self.x[len(self.x)-1]), self.x[len(self.x)-1].shape[1], self.x[len(self.x)-1].shape[2], self.x[len(self.x)-1].shape[3] ] ) ), axis=0 )
#for xrecind in range(len(self.x[len(self.x)-1]), self.tvt_records):
#print( "self.x.shape ", self.x[len(self.x)-1].shape, len(self.x[len(self.x)-1]), self.x[len(self.x)-1] )
print( self.tvt_records_fall_short_clss )
self.x = np.array(self.x)
#np.save(os.path.join(dataroot,'data.npy'),self.x)
if not is_disable_heavy_functions_temporarily:
with open( base_classes_file, 'w') as outfile:
json.dump(self.x.tolist(), outfile)
if is_load_test_record:
if main_lsize == 1:
if is_debug:
print("loaded prepared base_classes_file")
self.x = array( json.load( open( base_classes_file ) ) )
else:
if is_debug:
print("Not loaded prepared base_classes_file")
for cli in range(0, self.total_base_classes):
self.tvt_records_fall_short_clss[cli] = test_record_index
if is_debug:
print(self.x.shape)
print( "loaded test record " )
print( self.evaluation.shape )
temp = [] # Free memory
if self.is_run_time_predictions:
if is_debug:
print( "temp_to_be_predicted.keys()" )
print( temp_to_be_predicted.keys() )
cls_index = 0
for classes in temp_to_be_predicted.keys():
self.x_to_be_predicted_cls_indexes[classes] = cls_index
self.x_to_be_predicted.append(np.array(temp_to_be_predicted[ list(temp_to_be_predicted.keys())[classes]]))
cls_index = cls_index + 1
self.x_to_be_predicted = np.array(self.x_to_be_predicted)
temp_to_be_predicted = [] # Free memory
#np.save(os.path.join(dataroot,'data.npy'),self.x)
with open( base_classes_file+"_x_to_be_predicted.json", 'w') as outfile:
json.dump(self.x_to_be_predicted.tolist(), outfile)
else:
if is_debug:
print("loaded prepared base_classes_file")
self.x = array( json.load( open( base_classes_file ) ) )
if is_debug:
print(self.x.shape)
if is_evaluation_only == False:
if is_debug:
print("loaded prepared x_to_be_predicted file")
self.x_to_be_predicted = array( json.load( open( base_classes_file+"_x_to_be_predicted.json" ) ) )
#
is_loaded_evaluation_file = False
if is_evaluation_only == True and is_load_test_record == False:
if not os.path.exists(evaluation_input_file.replace('{i}', str(0)) + "_prepared.json"):
input = array( json.load( open( evaluation_input_file.replace('{i}', str(0)) ) ) )
input_labels = array( json.load( open( evaluation_labels_file.replace('{i}', str(0)) ) ) )
temp = dict()
temp_to_be_predicted = dict()
sizei = len(input)
print("sizei")
print(sizei)
for i in np.arange(sizei):
if input_labels[i] in temp:
if len( temp[input_labels[i]] ) >= self.tvt_records: #only 20 samples per class
if is_evaluation_only == False and (input_labels[i] < self.total_base_classes or np.mod( input_labels[i] - self.total_base_classes, 30 ) == 0 or np.mod( input_labels[i] - (self.total_base_classes+1), 30 ) == 0): #True or False and (True or input_labels[i] == 6):
lbl_val = input_labels[i]
if input_labels[i] >= self.total_base_classes and np.mod( input_labels[i] - self.total_base_classes, 30 ) == 0:
lbl_val = self.total_base_classes + int( (input_labels[i] - self.total_base_classes) / 30 )
if input_labels[i] >= self.total_base_classes and np.mod( input_labels[i] - (self.total_base_classes+1), 30 ) == 0:
lbl_val = (self.total_base_classes*2) + int( (input_labels[i] - (self.total_base_classes+1)) / 30 )
if lbl_val in temp_to_be_predicted:
if len( temp_to_be_predicted[lbl_val] ) >= self.re_records: #only 20 samples per class
continue
temp_to_be_predicted[lbl_val].append( input[i][:,:,np.newaxis] )
else:
temp_to_be_predicted[lbl_val]=[input[i][:,:,np.newaxis]]
continue
temp[input_labels[i]].append( input[i][:,:,np.newaxis] )
else:
temp[input_labels[i]]=[input[i][:,:,np.newaxis]]
"""
print( "temp.keys()" )
#print( temp.keys() )
#for key, value in temp.items():
# if True or len(value) < 19:
# print("key " + str(key) + " len " + str(len(value)))
unique, counts = np.unique(input_labels, return_counts=True)
print( dict(zip(unique, counts)) )
print(temp.keys())
"""
input = [] # Free memory
input_labels = [] # Free memory
self.evaluation = []
for classes in temp.keys():
self.evaluation.append(np.array(temp[ list(temp.keys())[classes]]))
self.evaluation = np.array(self.evaluation)
temp = [] # Free memory
else:
print("loaded prepared evaluation_input_file")
is_loaded_evaluation_file = True
self.evaluation = array( json.load( open( evaluation_input_file.replace('{i}', str(0)) + "_prepared.json" ) ) )
"""
#TODO temp
if self.x.shape[2] >= 28 and self.x.shape[3] >= 108:
self.x[:,:,27,99,0] = 0
self.x[:,:,27,103,0] = 0
self.x[:,:,27,107,0] = 0
if is_evaluation_only == False:
if self.is_run_time_predictions:
self.x_to_be_predicted[:,:,27,99,0] = 0
self.x_to_be_predicted[:,:,27,103,0] = 0
self.x_to_be_predicted[:,:,27,107,0] = 0
else:
if self.evaluation.shape[2] >= 28 and self.evaluation.shape[3] >= 108:
self.evaluation[:,:,27,99,0] = 0
self.evaluation[:,:,27,103,0] = 0
self.evaluation[:,:,27,107,0] = 0
"""
#TODO tmp. compare
"""
print(self.x.shape)
print(self.evaluation.shape)
is_found = False
for h in range(0, self.x.shape[0]):
print("classssssssssssssssssssssssssssssssssssssssssssssssssssssssssss " + str(h))
for i in range(0, self.x.shape[1]):
#print( "x indices val " + str(self.x[h,i,27,99,0]) + " " + str(self.x[h,i,27,103,0]) + " " + str(self.x[h,i,27,107,0]) )
xt = np.copy(self.x[h,i,:,:,:])
xt[27,99,0] = 0
xt[27,103,0] = 0
xt[27,107,0] = 0
et = np.copy(self.evaluation[self.evaluate_classes,0,:,:,:])
et[27,99,0] = 0
et[27,103,0] = 0
et[27,107,0] = 0
#print( "evaluation indices val " + str(self.evaluation[self.evaluate_classes,i,27,99,0]) + " " + str(self.evaluation[self.evaluate_classes,i,27,103,0]) + " " + str(self.evaluation[self.evaluate_classes,i,27,107,0]) )
result = np.subtract( xt, et)
if (result > 1.0).sum() >= 1 or (result < -1.0).sum() >= 1:
continue
print ('the difference h ' + str(h) + ' i ' + str(i))
print (result)
if (result > 0.0).sum() == 0 and (result < 0.0).sum() == 0:
is_found = True
self.evaluate_classes = h
self.evaluation[self.evaluate_classes,:,27,99,0] = self.x[h,i,27,99,0]
self.evaluation[self.evaluate_classes,:,27,103,0] = self.x[h,i,27,103,0]
self.evaluation[self.evaluate_classes,:,27,107,0] = self.x[h,i,27,107,0]
print("fioundddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
break
if is_found == True:
break
if is_found == False:
sdfhsdhfkjhd
if is_evaluation_only == True:
is_found = False
for i in range(0, self.x.shape[1]):
xt = np.copy(self.x[self.evaluate_classes,i,:,:,:])
xt[27,99,0] = 0
xt[27,103,0] = 0
xt[27,107,0] = 0
et = np.copy(self.evaluation[self.evaluate_classes,0,:,:,:])
et[27,99,0] = 0
et[27,103,0] = 0
et[27,107,0] = 0
result = np.subtract( xt, et)
if (result > 1.0).sum() >= 1 or (result < -1.0).sum() >= 1:
continue
#print ('the difference i ' + str(i))
#print (result)
if (result > 0.0).sum() == 0 and (result < 0.0).sum() == 0:
is_found = True
self.evaluation[:,:,27,99,0] = self.x[self.evaluate_classes,i,27,99,0]
self.evaluation[:,:,27,103,0] = self.x[self.evaluate_classes,i,27,103,0]
self.evaluation[:,:,27,107,0] = self.x[self.evaluate_classes,i,27,107,0]
print("fioundddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
break
if is_found == False:
sdfhsdhfkjhd
"""
#TODO tmp. compare
if is_evaluation_only == True and is_compare == True:
is_found = False
for c in range(0, self.x.shape[0]):
for i in range(0, self.x.shape[1]):
xt = np.copy(self.x[c,i,:,:,:])
et = np.copy(self.evaluation[self.evaluate_classes,0,:,:,:])
result = np.subtract( xt, et)
if (result > 1.0).sum() >= 1 or (result < -1.0).sum() >= 1:
continue
#print ('the difference i ' + str(i))
#print (result)
if (result > 0.0).sum() == 0 and (result < 0.0).sum() == 0:
is_found = True
print("c " + str(c) + " i " + str(i))
print("fioundddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
break
if is_found == False:
print("loading prepared x_to_be_predicted file for testing")
self.x_to_be_predicted = array( json.load( open( base_classes_file+"_x_to_be_predicted.json" ) ) )
self.x_to_be_predicted[:,:,27,99,0] = 0
self.x_to_be_predicted[:,:,27,103,0] = 0
self.x_to_be_predicted[:,:,27,107,0] = 0
for c in range(0, self.x_to_be_predicted.shape[0]):
for i in range(0, self.x_to_be_predicted.shape[1]):
xt = np.copy(self.x_to_be_predicted[c,i,:,:,:])
et = np.copy(self.evaluation[self.evaluate_classes,0,:,:,:])
result = np.subtract( xt, et)
if (result > 1.0).sum() >= 1 or (result < -1.0).sum() >= 1:
continue
#print ('the difference i ' + str(i))
#print (result)
if (result > 0.0).sum() == 0 and (result < 0.0).sum() == 0:
is_found = True
print("c " + str(c) + " i " + str(i))
print("x_to_be_predicted fioundddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
break
if is_found == False:
print("not found")
sdfhsdhfkjhd
#
self.shuffle_classes = [] #not used #np.arange(self.x.shape[0])
self.is_apply_pca_first = is_apply_pca_first
#pca
if self.is_apply_pca_first == 1:
#data = self.x.reshape(self.x.shape[0]*self.x.shape[1], self.x.shape[2]*self.x.shape[3])
data = []
xcsize = self.x.shape[0]
xssize = self.x.shape[1]
for c in range(0, xcsize):
for s in range(0, xssize):
data.append( self.x[c, s, :, :, :].reshape( self.x.shape[2]*self.x.shape[3] ) )
if len(self.x_to_be_predicted) == 0:
print("loading prepared x_to_be_predicted file for pca")
self.x_to_be_predicted = array( json.load( open( base_classes_file+"_x_to_be_predicted.json" ) ) )
xtpcsize = self.x_to_be_predicted.shape[0]
xtpssize = self.x_to_be_predicted.shape[1]
for c in range(0, xtpcsize):
for s in range(0, xtpssize):
data.append( self.x_to_be_predicted[c, s, :, :, :].reshape( self.x.shape[2]*self.x.shape[3] ) )
if is_evaluation_only == True:
if is_loaded_evaluation_file == True:
xecsize = self.evaluation.shape[0]
xessize = self.evaluation.shape[1]
for c in range(0, xecsize):
for s in range(0, xessize):
data.append( self.evaluation[c, s, :, :, :].reshape( self.x.shape[2]*self.x.shape[3] ) )
else:
data.append( self.evaluation[0, 0, :, :, :].reshape( self.x.shape[2]*self.x.shape[3] ) )
data = np.array(data)
print(data.shape)
"""
##
#print("pca matlab")
#from matplotlib.mlab import PCA
#p = PCA(data)
#print( p.Wt )
#print( p.Wt.shape )
#
print( "pca custom from so https://stackoverflow.com/a/13224592" )
def PCA(data, dims_rescaled_data=2):
""""""
returns: data transformed in 2 dims/columns + regenerated original data
pass in: data as 2D NumPy array
""""""
import numpy as NP
from scipy import linalg as LA
m, n = data.shape
# mean center the data
data -= data.mean(axis=0)
# calculate the covariance matrix
R = NP.cov(data, rowvar=False)
# calculate eigenvectors & eigenvalues of the covariance matrix
# use 'eigh' rather than 'eig' since R is symmetric,
# the performance gain is substantial
evals, evecs = LA.eigh(R)
# sort eigenvalue in decreasing order
idx = NP.argsort(evals)[::-1]
evecs = evecs[:,idx]
# sort eigenvectors according to same index
evals = evals[idx]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
evecs = evecs[:, :dims_rescaled_data]
# carry out the transformation on the data using eigenvectors
# and return the re-scaled data, eigenvalues, and eigenvectors
return NP.dot(evecs.T, data.T).T, evals, evecs
def test_PCA(data, dims_rescaled_data=2):
'''
test by attempting to recover original data array from
the eigenvectors of its covariance matrix & comparing that
'recovered' array with the original data
'''
_ , _ , eigenvectors = PCA(data, dim_rescaled_data=2)
data_recovered = NP.dot(eigenvectors, m).T
data_recovered += data_recovered.mean(axis=0)
assert NP.allclose(data, data_recovered)
def plot_pca(data):
from matplotlib import pyplot as MPL
clr1 = '#2026B2'
fig = MPL.figure()
ax1 = fig.add_subplot(111)
data_resc, data_orig = PCA(data)
ax1.plot(data_resc[:, 0], data_resc[:, 1], '.', mfc=clr1, mec=clr1)
MPL.show()
#print( plot_pca(data) )
"""
from sklearn.decomposition import PCA
p = PCA(n_components = pca_components).fit_transform(data)
print( type(p) )
print( p )
print( p.shape )
ind = 0
new_dimension = int( math.sqrt( pca_components ) )
self.x = np.zeros( ( xcsize, xssize, new_dimension, new_dimension, 1 ) )
for c in range(0, xcsize):
for s in range(0, xssize):
self.x[c, s, :, :, :] = p[ind].reshape( new_dimension, new_dimension, 1 )
ind = ind + 1
if is_evaluation_only == False:
self.x_to_be_predicted = np.zeros( ( xtpcsize, xtpssize, new_dimension, new_dimension, 1 ) )
else:
self.x_to_be_predicted = []
for c in range(0, xtpcsize):
for s in range(0, xtpssize):
if is_evaluation_only == False:
self.x_to_be_predicted[c, s, :, :, :] = p[ind].reshape( new_dimension, new_dimension, 1 )
ind = ind + 1
if is_evaluation_only == True:
self.evaluation = np.zeros( ( self.evaluation.shape[0], self.evaluation.shape[1], new_dimension, new_dimension, 1 ) )
if is_loaded_evaluation_file == True:
for c in range(0, xecsize):
for s in range(0, xessize):
self.evaluation[c, s, :, :, :] = p[ind].reshape( new_dimension, new_dimension, 1 )
ind = ind + 1
else:
self.evaluation[:, :, :, :, :] = p[ len(p) - 1 ].reshape( new_dimension, new_dimension, 1 )
data = []
p = []
#visualize
if is_visualize_data == True:
raise Exception("Not implemented yet")
"""
#TODO temp
self.x = self.x[:30]
self.evaluation = self.evaluation[0:30]
shuffle_classes = np.arange(self.x.shape[0])
np.random.shuffle(shuffle_classes)
print("shuffle_classes")
print(shuffle_classes)
self.shuffle_classes = shuffle_classes
self.x = self.x[shuffle_classes]
self.evaluation = self.evaluation[shuffle_classes]
"""
self.data_pack_shape_2 = None
self.data_pack_shape_3 = None
"""
Constructs an N-Shot omniglot Dataset
:param batch_size: Experiment batch_size
:param classes_per_set: Integer indicating the number of classes per set
:param samples_per_class: Integer indicating samples per class
e.g. For a 20-way, 1-shot learning task, use classes_per_set=20 and samples_per_class=1
For a 5-way, 10-shot learning task, use classes_per_set=5 and samples_per_class=10
"""
#shuffle_classes = np.arange(self.x.shape[0])
#np.random.shuffle(shuffle_classes)
#self.x = self.x[shuffle_classes]
self.cache_sample = 0
self.cache_sample_prediction = 0
self.is_rotate = False
if is_use_sample_data:
self.is_rotate = True
self.cache_sample = 1000
self.cache_sample_prediction = 10
self.x_train, self.x_test, self.x_val = self.x[:1200], self.x[1200:1500], self.x[1500:]
else:
self.is_rotate = False
self.cache_sample = 300
self.cache_sample_prediction = cache_samples_for_evaluation
if is_evaluation_only == False:
#self.x_train, self.x_test, self.x_val = self.x[:900], self.x[900:1200], self.x[1200:]
#self.x_train, self.x_test, self.x_val = self.x[:30], self.x[30:43], self.x[43:]
#self.x_train, self.x_test, self.x_val = self.x[:200], self.x[200:270], self.x[270:]
if is_run_validation_batch:
self.x_train, self.x_test, self.x_val = self.x[:4], None, self.x[4:] #, self.x[6:]
else:
self.x_train, self.x_test, self.x_val = self.x[:], None, None #, self.x[6:]
if is_switch_dim:
self.x_train = self.x_train.reshape( ( self.x_train.shape[0], self.x_train.shape[1], self.x_train.shape[3], self.x_train.shape[2], self.x_train.shape[4] ) )
if self.x_test is not None:
self.x_test = self.x_test.reshape( ( self.x_test.shape[0], self.x_test.shape[1], self.x_test.shape[3], self.x_test.shape[2], self.x_train.shape[4] ) )
if self.x_val is not None:
self.x_val = self.x_val.reshape( ( self.x_val.shape[0], self.x_val.shape[1], self.x_val.shape[3], self.x_val.shape[2], self.x_train.shape[4] ) )
else:
self.x_train = self.x[:]
if is_switch_dim:
self.x_train = self.x_train.reshape( ( self.x_train.shape[0], self.x_train.shape[1], self.x_train.shape[3], self.x_train.shape[2], self.x_train.shape[4] ) )
self.evaluation = self.evaluation.reshape( ( self.evaluation.shape[0], self.evaluation.shape[1], self.evaluation.shape[3], self.evaluation.shape[2], self.evaluation.shape[4] ) )
#print( self.x_train[0][0] )
self.normalization()
#print( self.x_train[0][0] )
self.batch_size = batch_size
self.n_classes = self.x.shape[0]
self.classes_per_set = classes_per_set
self.samples_per_class = samples_per_class
if not is_load_file_data_only:
if is_evaluation_only == False:
self.indexes = {"train": 0, "val": 0, "test": 0, "x_to_be_predicted": 0}
self.datasets = {"train": self.x_train, "val": self.x_val, "test": self.x_test, "x_to_be_predicted": self.x_to_be_predicted} #original data cached
self.datasets_cache = {"train": self.load_data_cache(self.datasets["train"], ""), #current epoch data cached
"val": None if self.x_val == None else self.load_data_cache(self.datasets["val"], ""),
"test": None if self.x_test == None else self.load_data_cache(self.datasets["test"], ""),
"x_to_be_predicted": None if not self.is_run_time_predictions else self.load_data_cache(self.datasets["x_to_be_predicted"], "x_to_be_predicted")}
else:
self.indexes = {"evaluation": 0}
self.datasets = {"evaluation": self.x_train} #original data cached
self.datasets_cache = {"evaluation": self.load_data_cache_for_evaluation(self.datasets["evaluation"], "evaluation", self.evaluation, True)}
def get_data_x(self):
return self.x
def normalization(self):
"""
Normalizes our data, to have a mean of 0 and sdt of 1
Normalizes our data, to have a mean of 0 and sdt of 1
"""
return
self.mean = np.mean(self.x_train)
self.std = np.std(self.x_train)
self.max = np.max(self.x_train)
self.min = np.min(self.x_train)
print("train_shape", self.x_train.shape, "test_shape", self.x_test.shape, "val_shape", self.x_val.shape, "x_to_be_predicted", self.x_to_be_predicted.shape)
print("before_normalization", "mean", self.mean, "max", self.max, "min", self.min, "std", self.std)
#if required for your data enable normatlization by uncommenting below code
"""
self.x_train = (self.x_train - self.mean) / self.std
self.x_val = (self.x_val - self.mean) / self.std
self.x_test = (self.x_test - self.mean) / self.std
self.x_to_be_predicted = (self.x_to_be_predicted - self.mean) / self.std
self.mean = np.mean(self.x_train)
self.std = np.std(self.x_train)
self.max = np.max(self.x_train)
self.min = np.min(self.x_train)
"""
print("after_normalization", "mean", self.mean, "max", self.max, "min", self.min, "std", self.std)
def load_data_cache(self, data_pack, data_pack_type):
"""
Collects 1000 batches data for N-shot learning
:param data_pack: Data pack to use (any one of train, val, test)
:return: A list with [support_set_x, support_set_y, target_x, target_y] ready to be fed to our networks
"""
if self.is_debug:
print( "data_pack" )
print( data_pack_type )
print( data_pack.shape )
#here skip adding batch if batch persistancy is enabled for some specific requirements and cache is available then simply return the batch from cache
if self.is_batch_persistancy:
if todo_is_batch_cache_vailable:
todo = True
raise Exception("Not implemented yet")
"""
print( data_pack.shape[0] )
print( data_pack.shape[2] )
print( data_pack.shape[3] )
"""
if self.data_pack_shape_2 == None:
self.data_pack_shape_2 = data_pack.shape[2]
if self.data_pack_shape_3 == None:
self.data_pack_shape_3 = data_pack.shape[3]
n_samples = self.samples_per_class * self.classes_per_set
data_cache = []
for sample in range(self.cache_sample):
"""
#TODO temp. profiling, comment it when not needed
import cProfile, pstats
import io as StringIO
print( "profiling start" )
pr = cProfile.Profile()
pr.enable()
"""
support_set_x = np.zeros((self.batch_size, n_samples, self.data_pack_shape_2, self.data_pack_shape_3, 1))
support_set_y = np.zeros((self.batch_size, n_samples))
target_x = np.zeros((self.batch_size, self.samples_per_class, self.data_pack_shape_2, self.data_pack_shape_3, 1), dtype=np.int)
target_y = np.zeros((self.batch_size, self.samples_per_class), dtype=np.int)
support_set_y_actuals = np.zeros((self.batch_size, n_samples), dtype=np.int)
target_y_actuals = np.zeros((self.batch_size, self.samples_per_class), dtype=np.int)
for i in range(self.batch_size):
pinds = np.random.permutation(n_samples)
classes = np.random.choice(data_pack.shape[0], self.classes_per_set, False if not data_pack_type == "x_to_be_predicted" else False) #False
# select 1-shot or 5-shot classes for test with repetition
x_hat_class = np.random.choice(classes, self.samples_per_class, True)
pinds_test = np.random.permutation(self.samples_per_class)
ind = 0
ind_test = 0
for j, cur_class in enumerate(classes): # each class
#print( "example_inds" )
if cur_class in x_hat_class:
# Count number of times this class is inside the meta-test
n_test_samples = np.sum(cur_class == x_hat_class)
#example_inds = np.random.choice(data_pack.shape[1], self.samples_per_class + n_test_samples, self.choice_replace)
example_inds = np.random.choice(self.tvt_records_fall_short_clss[cur_class], self.samples_per_class + n_test_samples, self.choice_replace)
#print( "example_inds here 1 " + str(n_test_samples) )
else:
#print( "example_inds here 2 ", cur_class, self.tvt_records_fall_short_clss[cur_class], self.samples_per_class )
#example_inds = np.random.choice(data_pack.shape[1], self.samples_per_class, False)
example_inds = np.random.choice(self.tvt_records_fall_short_clss[cur_class], self.samples_per_class, False)
#print( example_inds )
# meta-training
for eind in example_inds[:self.samples_per_class]:
support_set_x[i, pinds[ind], :, :, :] = data_pack[cur_class][eind]
support_set_y[i, pinds[ind]] = j
support_set_y_actuals[i, pinds[ind]] = (cur_class+1) * -1
ind = ind + 1
# meta-test
for eind in example_inds[self.samples_per_class:]:
"""
print( "eind" )
print( eind )
print( cur_class )
print( i )
print( ind_test )
print( pinds_test[ind_test] )
"""
target_x[i, pinds_test[ind_test], :, :, :] = data_pack[cur_class][eind]
target_y[i, pinds_test[ind_test]] = j
target_y_actuals[i, pinds_test[ind_test]] = (cur_class+1) * -1
ind_test = ind_test + 1
data_cache.append([support_set_x, support_set_y, target_x, target_y, support_set_y_actuals, target_y_actuals])
"""
#TODO temp. profiling, comment it when not needed
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print( s.getvalue() )
sdfkjhskdfhkshdf
"""
return data_cache
def load_data_cache_for_evaluation(self, data_pack, data_pack_type, data_pack_evaluation, is_init_call = None):
"""
Collects 1000 batches data for N-shot learning
:param data_pack: Data pack to use (any one of train, val, test)
:return: A list with [support_set_x, support_set_y, target_x, target_y] ready to be fed to our networks
"""
if self.is_debug:
print( "data_pack" )
print( data_pack_type )
print( data_pack.shape )
print( "data_pack_evaluation" )
print( data_pack_evaluation.shape )
#shouldn't be called later during evaluation to speed up runtime
if not is_init_call == True:
raise Exception("Unexpected load cache call")
"""
print( data_pack.shape[0] )
print( data_pack.shape[2] )
print( data_pack.shape[3] )
"""
if self.data_pack_shape_2 == None:
self.data_pack_shape_2 = data_pack.shape[2]
if self.data_pack_shape_3 == None:
self.data_pack_shape_3 = data_pack.shape[3]
#TODO temp. eval with train data
is_eval_with_train_data = self.is_eval_with_train_data
list_evaluated_clss = []
n_samples = self.samples_per_class * self.classes_per_set
data_cache = []
for sample in range(0, self.cache_sample_prediction):
"""
#TODO temp. profiling, comment it when not needed
import cProfile, pstats
import io as StringIO
print( "profiling start" )
pr = cProfile.Profile()
pr.enable()
"""
self.evaluate_classes = math.floor(sample / 10)
support_set_x = np.zeros((self.batch_size, n_samples, self.data_pack_shape_2, self.data_pack_shape_3, 1))
support_set_y = np.zeros((self.batch_size, n_samples), dtype=np.int)#)
target_x = np.zeros((self.batch_size, self.samples_per_class, self.data_pack_shape_2, self.data_pack_shape_3, 1))#, dtype=np.int)
target_y = np.zeros((self.batch_size, self.samples_per_class), dtype=np.int)
target_y_actuals = np.zeros((self.batch_size, self.samples_per_class), dtype=np.int)
for i in range(self.batch_size):
pinds = np.random.permutation(n_samples)
#classes = np.random.choice(data_pack.shape[0], self.classes_per_set, False if not data_pack_type == "x_to_be_predicted" else False) #False
#classes = np.random.choice( self.prediction_classes, self.classes_per_set, False if not data_pack_type == "x_to_be_predicted" else False)
if not self.is_evaluation_res_in_obj:
classes = np.random.choice( 30, self.classes_per_set, False if not data_pack_type == "x_to_be_predicted" else False)
else:
classes = np.random.choice( data_pack.shape[0], self.classes_per_set, False if not data_pack_type == "x_to_be_predicted" else False)
# select 1-shot or 5-shot classes for test with repetition
if not self.is_evaluation_res_in_obj:
x_hat_class = np.random.choice(classes, self.samples_per_class, True)
else:
#find least evaluated
x_hat_class = []
for jtmp, tmp_class in enumerate(classes):
if not tmp_class in list_evaluated_clss:
list_evaluated_clss.append( tmp_class )
x_hat_class = np.array( [ tmp_class ] )
break
if len(x_hat_class) == 0:
for jtmp, tmp_class in enumerate(classes):
if list_evaluated_clss.count(tmp_class) == 1:
list_evaluated_clss.append( tmp_class )
x_hat_class = np.array( [ tmp_class ] )
break
if len(x_hat_class) == 0:
x_hat_class = np.random.choice(classes, self.samples_per_class, True)
self.evaluate_classes = x_hat_class[0]
pinds_test = np.random.permutation(self.samples_per_class)
ind = 0
ind_test = 0
for j, cur_class in enumerate(classes): # each class
example_inds_test = []
#print( "example_inds j " + str(j) )
if cur_class in x_hat_class:
# Count number of times this class is inside the meta-test
n_test_samples = np.sum(cur_class == x_hat_class)
if is_eval_with_train_data == True or not cur_class == self.evaluate_classes:
if not cur_class == self.evaluate_classes:
#example_inds = np.random.choice(data_pack.shape[1], self.samples_per_class+n_test_samples, self.choice_replace)
example_inds = np.random.choice(self.tvt_records_fall_short_clss[cur_class], self.samples_per_class+n_test_samples, self.choice_replace)
else:
#print( "example_inds_test here 1 in train mode" )
#example_inds = np.random.choice(data_pack.shape[1], self.samples_per_class + (n_test_samples - 1), self.choice_replace)
example_inds = np.random.choice(self.tvt_records_fall_short_clss[cur_class], self.samples_per_class + (n_test_samples - 1), self.choice_replace)
example_inds_test = np.array( [0] ) #np.random.choice(self.evaluate_classes, self.evaluate_classes, False)
else:
#print( "example_inds_test here 1 " )
#example_inds = np.random.choice(data_pack.shape[1], self.samples_per_class + (n_test_samples - 1), self.choice_replace)
example_inds = np.random.choice(self.tvt_records_fall_short_clss[cur_class], self.samples_per_class + (n_test_samples - 1), self.choice_replace)
example_inds_test = np.array( [0] ) #np.random.choice(self.evaluate_classes, self.evaluate_classes, False)
#print( "example_inds here 1 " + str(n_test_samples) )
else:
#print( "example_inds here 2 cur_class ", cur_class, " tvt_records_fall_short_clss ", self.tvt_records_fall_short_clss[cur_class], " samples_per_class ", self.samples_per_class )
#print( self.tvt_records_fall_short_clss )
#example_inds = np.random.choice(data_pack.shape[1], self.samples_per_class, False)
example_inds = np.random.choice(self.tvt_records_fall_short_clss[cur_class], self.samples_per_class, False)
#print( example_inds )
# meta-training
for eind in example_inds[:self.samples_per_class]:
support_set_x[i, pinds[ind], :, :, :] = data_pack[cur_class][eind]
support_set_y[i, pinds[ind]] = j
ind = ind + 1
# meta-test
if is_eval_with_train_data == True and not cur_class == self.evaluate_classes:
for eind in example_inds[self.samples_per_class:]:
"""
print( "eind" )
print( eind )
print( cur_class )
print( i )
print( ind_test )
print( pinds_test[ind_test] )
"""
target_x[i, pinds_test[ind_test], :, :, :] = data_pack[cur_class][eind]
target_y[i, pinds_test[ind_test]] = j
ind_test = ind_test + 1
else:
for eind in example_inds[self.samples_per_class:]:
"""
print( "eind" )
print( eind )
print( cur_class )
print( i )
print( ind_test )
print( pinds_test[ind_test] )
"""
target_x[i, pinds_test[ind_test], :, :, :] = data_pack[cur_class][eind]
target_y[i, pinds_test[ind_test]] = j
ind_test = ind_test + 1
if len(example_inds_test) > 0:
for eind in example_inds_test[:]:
"""
print( "eind" )
print( eind )
print( cur_class )
print( i )
print( ind_test )
print( pinds_test[ind_test] )
"""
if is_eval_with_train_data == True:
target_x[i, pinds_test[ind_test], :, :, :] = data_pack[cur_class+self.negative_test_offset][eind]
else:
target_x[i, pinds_test[ind_test], :, :, :] = data_pack_evaluation[cur_class+self.negative_test_offset][eind]
target_y[i, pinds_test[ind_test]] = j
target_y_actuals[i, pinds_test[ind_test]] = (cur_class+1+self.negative_test_offset) * -1
ind_test = ind_test + 1
data_cache.append([support_set_x, support_set_y, target_x, target_y, target_y_actuals])
"""
#TODO temp. profiling, comment it when not needed
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print( s.getvalue() )
sdfkjhskdfhkshdf
"""
return data_cache
def __get_batch(self, dataset_name):
"""
Gets next batch from the dataset with name.
:param dataset_name: The name of the dataset (one of "train", "val", "test")
:return:
"""
if self.indexes[dataset_name] >= len(self.datasets_cache[dataset_name]):
self.indexes[dataset_name] = 0
self.datasets_cache[dataset_name] = self.load_data_cache(self.datasets[dataset_name], dataset_name)
next_batch = self.datasets_cache[dataset_name][self.indexes[dataset_name]]
self.indexes[dataset_name] += 1
x_support_set, y_support_set, x_target, y_target = next_batch
return x_support_set, y_support_set, x_target, y_target
def get_batch(self,str_type, rotate_flag = False):
"""
Get next batch
:return: Next batch
"""
x_support_set, y_support_set, x_target, y_target = self.__get_batch(str_type)
if rotate_flag:
k = int(np.random.uniform(low=0, high=4))
# Iterate over the sequence. Extract batches.
for i in np.arange(x_support_set.shape[0]):
x_support_set[i,:,:,:,:] = self.__rotate_batch(x_support_set[i,:,:,:,:],k)
# Rotate all the batch of the target images
for i in np.arange(x_target.shape[0]):
x_target[i,:,:,:,:] = self.__rotate_batch(x_target[i,:,:,:,:], k)
return x_support_set, y_support_set, x_target, y_target
def get_batch_custom(self,str_type, cls, rotate_flag = False):
"""
Get next batch
:return: Next batch
"""
x_support_set, y_support_set, x_target, y_target = self.__get_batch(str_type)
if rotate_flag:
k = int(np.random.uniform(low=0, high=4))
# Iterate over the sequence. Extract batches.
for i in np.arange(x_support_set.shape[0]):
x_support_set[i,:,:,:,:] = self.__rotate_batch(x_support_set[i,:,:,:,:],k)
# Rotate all the batch of the target images
for i in np.arange(x_target.shape[0]):
x_target[i,:,:,:,:] = self.__rotate_batch(x_target[i,:,:,:,:], k)
"""
print( "get_batch_custom" )
print( x_support_set.shape )
print( y_support_set.shape )
print( x_target.shape )
print( y_target.shape )
x_support_set_tmp, y_support_set_tmp, x_target_tmp, y_target_tmp = x_support_set, y_support_set, x_target, y_target
for i in np.arange(8):
x_support_set_tmp[i,:,:,:,:], y_support_set_tmp[i,:], x_target_tmp[i,:,:,:,:], y_target_tmp[i,:] = x_support_set[self.x_to_be_predicted_cls_indexes[cls]:self.x_to_be_predicted_cls_indexes[cls]+1,:,:,:,:], y_support_set[self.x_to_be_predicted_cls_indexes[cls]:self.x_to_be_predicted_cls_indexes[cls]+1,:], x_target[self.x_to_be_predicted_cls_indexes[cls]:self.x_to_be_predicted_cls_indexes[cls]+1,:,:,:,:], y_target[self.x_to_be_predicted_cls_indexes[cls]:self.x_to_be_predicted_cls_indexes[cls]+1,:]
print( x_support_set_tmp.shape )
print( y_support_set_tmp )
print( x_target_tmp.shape )
print( y_target_tmp )
return x_support_set_tmp, y_support_set_tmp, x_target_tmp, y_target_tmp
"""
for i in np.arange( len(y_support_set) ):
for j in np.arange( len(y_support_set[i]) ):
if y_support_set[i][j] >= self.total_base_classes and y_support_set[i][j] < (self.total_base_classes*2):
y_support_set[i][j] = self.total_base_classes + ( (y_support_set[i][j] - self.total_base_classes) * 30 )
if y_support_set[i][j] >= (self.total_base_classes*2):
y_support_set[i][j] = (self.total_base_classes+1) + ( (y_support_set[i][j] - (self.total_base_classes+1)) * 30 )
for i in np.arange( len(y_target) ):
for j in np.arange( len(y_target[i]) ):
if y_target[i][j] >= self.total_base_classes and y_target[i][j] < (self.total_base_classes*2):
y_target[i][j] = self.total_base_classes + ( (y_target[i][j] - self.total_base_classes) * 30 )
if y_target[i][j] >= (self.total_base_classes*2):
y_target[i][j] = (self.total_base_classes+1) + ( (y_target[i][j] - (self.total_base_classes+1)) * 30 )
return x_support_set, y_support_set, x_target, y_target
def __get_batch_training(self, dataset_name):
"""
Gets next batch from the dataset with name.
:param dataset_name: The name of the dataset (one of "train", "val", "test")
:return:
"""
if self.indexes[dataset_name] >= len(self.datasets_cache[dataset_name]):
self.indexes[dataset_name] = 0
self.datasets_cache[dataset_name] = self.load_data_cache(self.datasets[dataset_name], dataset_name)
next_batch = self.datasets_cache[dataset_name][self.indexes[dataset_name]]
self.indexes[dataset_name] += 1
#x_support_set, y_support_set, x_target, y_target = next_batch
#return x_support_set, y_support_set, x_target, y_target
return next_batch
def get_batch_training(self, str_type, rotate_flag = False):
"""
Get next batch
:return: Next batch
"""
x_support_set, y_support_set, x_target, y_target, support_set_y_actuals, target_y_actuals = self.__get_batch_training(str_type)
if self.is_rotate and rotate_flag:
k = int( | np.random.uniform(low=0, high=4) | numpy.random.uniform |
import os
import numpy as np
# make sure you have $CARLA_ROOT/PythonClient in your PYTHONPATH
from carla.driving_benchmark.experiment_suites import CoRL2017
from rl_coach.logger import screen
from rl_coach.agents.cil_agent import CILAgentParameters
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import RegressionHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.architectures.layers import Conv2d, Dense, BatchnormActivationDropout
from rl_coach.base_parameters import VisualizationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.environments.carla_environment import CarlaEnvironmentParameters
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.filters.filter import InputFilter
from rl_coach.filters.observation.observation_crop_filter import ObservationCropFilter
from rl_coach.filters.observation.observation_reduction_by_sub_parts_name_filter import \
ObservationReductionBySubPartsNameFilter
from rl_coach.filters.observation.observation_rescale_to_size_filter import ObservationRescaleToSizeFilter
from rl_coach.filters.observation.observation_to_uint8_filter import ObservationToUInt8Filter
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.schedules import ConstantSchedule
from rl_coach.spaces import ImageObservationSpace
from rl_coach.utilities.carla_dataset_to_replay_buffer import create_dataset
from rl_coach.core_types import PickledReplayBuffer
####################
# Graph Scheduling #
####################
schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(10000000000)
schedule_params.steps_between_evaluation_periods = TrainingSteps(500)
schedule_params.evaluation_steps = EnvironmentEpisodes(5)
schedule_params.heatup_steps = EnvironmentSteps(0)
################
# Agent Params #
################
agent_params = CILAgentParameters()
# forward camera and measurements input
agent_params.network_wrappers['main'].input_embedders_parameters = {
'CameraRGB': InputEmbedderParameters(
scheme=[
Conv2d(32, 5, 2),
BatchnormActivationDropout(batchnorm=True, activation_function='tanh'),
Conv2d(32, 3, 1),
BatchnormActivationDropout(batchnorm=True, activation_function='tanh'),
Conv2d(64, 3, 2),
BatchnormActivationDropout(batchnorm=True, activation_function='tanh'),
Conv2d(64, 3, 1),
BatchnormActivationDropout(batchnorm=True, activation_function='tanh'),
Conv2d(128, 3, 2),
BatchnormActivationDropout(batchnorm=True, activation_function='tanh'),
Conv2d(128, 3, 1),
BatchnormActivationDropout(batchnorm=True, activation_function='tanh'),
Conv2d(256, 3, 1),
BatchnormActivationDropout(batchnorm=True, activation_function='tanh'),
Conv2d(256, 3, 1),
BatchnormActivationDropout(batchnorm=True, activation_function='tanh'),
Dense(512),
BatchnormActivationDropout(activation_function='tanh', dropout_rate=0.3),
Dense(512),
BatchnormActivationDropout(activation_function='tanh', dropout_rate=0.3)
],
activation_function='none' # we define the activation function for each layer explicitly
),
'measurements': InputEmbedderParameters(
scheme=[
Dense(128),
BatchnormActivationDropout(activation_function='tanh', dropout_rate=0.5),
Dense(128),
BatchnormActivationDropout(activation_function='tanh', dropout_rate=0.5)
],
activation_function='none' # we define the activation function for each layer explicitly
)
}
# simple fc middleware
agent_params.network_wrappers['main'].middleware_parameters = \
FCMiddlewareParameters(
scheme=[
Dense(512),
BatchnormActivationDropout(activation_function='tanh', dropout_rate=0.5)
],
activation_function='none'
)
# output branches
agent_params.network_wrappers['main'].heads_parameters = [
RegressionHeadParameters(
scheme=[
Dense(256),
BatchnormActivationDropout(activation_function='tanh', dropout_rate=0.5),
Dense(256),
BatchnormActivationDropout(activation_function='tanh')
],
num_output_head_copies=4 # follow lane, left, right, straight
)
]
# TODO: there should be another head predicting the speed which is connected directly to the forward camera embedding
agent_params.network_wrappers['main'].batch_size = 120
agent_params.network_wrappers['main'].learning_rate = 0.0002
# crop and rescale the image + use only the forward speed measurement
agent_params.input_filter = InputFilter()
agent_params.input_filter.add_observation_filter('CameraRGB', 'cropping',
ObservationCropFilter(crop_low=np.array([115, 0, 0]),
crop_high= | np.array([510, -1, -1]) | numpy.array |
import tensorflow as tf
import numpy as np
import random
from collections import deque
from unityagents.brain import BrainParameters
from unitytrainers.ppo.rl_teacher.segment_sampling import sample_segment_from_path
from rl_teacher.nn import FullyConnectedMLP
from rl_teacher.utils import corrcoef
from keras import backend as K
class ComparisonRewardPredictor():
"""Predictor that trains a model to predict how much reward is contained in a trajectory segment"""
def __init__(self, brain:BrainParameters , summary_writer, comparison_collector, agent_logger, label_schedule,clip_length):
self.summary_writer = summary_writer
self.agent_logger = agent_logger
self.comparison_collector = comparison_collector
self.label_schedule = label_schedule
# Set up some bookkeeping
self.recent_segments = deque(maxlen=200) # Keep a queue of recently seen segments to pull new comparisons from
#self._frames_per_segment = clip_length * env.fps
self._frames_per_segment = clip_length * 30
self._steps_since_last_training = 0
self._n_timesteps_per_predictor_training = 1e2 # How often should we train our predictor?
self._elapsed_predictor_training_iters = 0
# Build and initialize our predictor model
config = tf.ConfigProto(
device_count={'GPU': 0}
)
self.sess = tf.InteractiveSession(config=config)
self.obs_shape =tuple([1,brain.vector_observation_space_size])
#self.discrete_action_space = not hasattr(env.action_space, "shape")
self.act_shape = tuple([brain.vector_action_space_size])
self.graph = self._build_model()
self.sess.run(tf.global_variables_initializer())
def _predict_rewards(self, obs_segments, act_segments, network):
"""
:param obs_segments: tensor with shape = (batch_size, segment_length) + obs_shape
:param act_segments: tensor with shape = (batch_size, segment_length) + act_shape
:param network: neural net with .run() that maps obs and act tensors into a (scalar) value tensor
:return: tensor with shape = (batch_size, segment_length)
"""
batchsize = tf.shape(obs_segments)[0]
segment_length = tf.shape(obs_segments)[1]
# Temporarily chop up segments into individual observations and actions
obs = tf.reshape(obs_segments, (-1,) + self.obs_shape)
acts = tf.reshape(act_segments, (-1,) + self.act_shape)
# Run them through our neural network
rewards = network.run(obs, acts)
# Group the rewards back into their segments
return tf.reshape(rewards, (batchsize, segment_length))
def _build_model(self):
"""
Our model takes in path segments with states and actions, and generates Q values.
These Q values serve as predictions of the true reward.
We can compare two segments and sum the Q values to get a prediction of a label
of which segment is better. We then learn the weights for our model by comparing
these labels with an authority (either a human or synthetic labeler).
"""
# Set up observation placeholders
self.segment_obs_placeholder = tf.placeholder(
dtype=tf.float32,shape=self.obs_shape, name="obs_placeholder")
self.segment_alt_obs_placeholder = tf.placeholder(
dtype=tf.float32, shape=self.obs_shape, name="alt_obs_placeholder")
self.segment_act_placeholder = tf.placeholder(
dtype=tf.float32, shape=self.act_shape, name="act_placeholder")
self.segment_alt_act_placeholder = tf.placeholder(
dtype=tf.float32, shape=self.act_shape, name="alt_act_placeholder")
# A vanilla multi-layer perceptron maps a (state, action) pair to a reward (Q-value)
mlp = FullyConnectedMLP(self.obs_shape, self.act_shape)
self.q_value = self._predict_rewards(self.segment_obs_placeholder, self.segment_act_placeholder, mlp)
alt_q_value = self._predict_rewards(self.segment_alt_obs_placeholder, self.segment_alt_act_placeholder, mlp)
# We use trajectory segments rather than individual (state, action) pairs because
# video clips of segments are easier for humans to evaluate
segment_reward_pred_left = tf.reduce_sum(self.q_value, axis=1)
segment_reward_pred_right = tf.reduce_sum(alt_q_value, axis=1)
reward_logits = tf.stack([segment_reward_pred_left, segment_reward_pred_right], axis=1) # (batch_size, 2)
self.labels = tf.placeholder(dtype=tf.int32, shape=(None,), name="comparison_labels")
# delta = 1e-5
# clipped_comparison_labels = tf.clip_by_value(self.comparison_labels, delta, 1.0-delta)
data_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=reward_logits, labels=self.labels)
self.loss_op = tf.reduce_mean(data_loss)
global_step = tf.Variable(0, name='global_step', trainable=False)
self.train_op = tf.train.AdamOptimizer().minimize(self.loss_op, global_step=global_step)
return tf.get_default_graph()
def predict_reward(self, path):
"""Predict the reward for each step in a given path"""
with self.graph.as_default():
q_value = self.sess.run(self.q_value, feed_dict={
self.segment_obs_placeholder: np.asarray([path["obs"]]),
self.segment_act_placeholder: np.asarray([path["actions"]]),
K.learning_phase(): False
})
return q_value[0]
def path_callback(self, path):
path_length = len(path["obs"])
self._steps_since_last_training += path_length
self.agent_logger.log_episode(path)
# We may be in a new part of the environment, so we take new segments to build comparisons from
segment = sample_segment_from_path(path, int(self._frames_per_segment))
if segment:
self.recent_segments.append(segment)
# If we need more comparisons, then we build them from our recent segments
if len(self.comparison_collector) < int(self.label_schedule.n_desired_labels):
self.comparison_collector.add_segment_pair(
random.choice(self.recent_segments),
random.choice(self.recent_segments))
# Train our predictor every X steps
if self._steps_since_last_training >= int(self._n_timesteps_per_predictor_training):
self.train_predictor()
self._steps_since_last_training -= self._steps_since_last_training
def train_predictor(self):
self.comparison_collector.label_unlabeled_comparisons()
minibatch_size = min(64, len(self.comparison_collector.labeled_decisive_comparisons))
labeled_comparisons = random.sample(self.comparison_collector.labeled_decisive_comparisons, minibatch_size)
left_obs = np.asarray([comp['left']['obs'] for comp in labeled_comparisons])
left_acts = np.asarray([comp['left']['actions'] for comp in labeled_comparisons])
right_obs = np.asarray([comp['right']['obs'] for comp in labeled_comparisons])
right_acts = np.asarray([comp['right']['actions'] for comp in labeled_comparisons])
labels = np.asarray([comp['label'] for comp in labeled_comparisons])
with self.graph.as_default():
_, loss = self.sess.run([self.train_op, self.loss_op], feed_dict={
self.segment_obs_placeholder: left_obs,
self.segment_act_placeholder: left_acts,
self.segment_alt_obs_placeholder: right_obs,
self.segment_alt_act_placeholder: right_acts,
self.labels: labels,
K.learning_phase(): True
})
self._elapsed_predictor_training_iters += 1
self._write_training_summaries(loss)
def _write_training_summaries(self, loss):
self.agent_logger.log_simple("predictor/loss", loss)
# Calculate correlation between true and predicted reward by running validation on recent episodes
recent_paths = self.agent_logger.get_recent_paths_with_padding()
if len(recent_paths) > 1 and self.agent_logger.summary_step % 10 == 0: # Run validation every 10 iters
validation_obs = np.asarray([path["obs"] for path in recent_paths])
validation_acts = np.asarray([path["actions"] for path in recent_paths])
q_value = self.sess.run(self.q_value, feed_dict={
self.segment_obs_placeholder: validation_obs,
self.segment_act_placeholder: validation_acts,
K.learning_phase(): False
})
ep_reward_pred = np.sum(q_value, axis=1)
reward_true = np.asarray([path['original_rewards'] for path in recent_paths])
ep_reward_true = | np.sum(reward_true, axis=1) | numpy.sum |
import pytest
import mock
import os
import numpy as np
import awkward as awk
from zinv.modules.readers import WeightMetTrigger
class DummyColl(object):
pass
class DummyEvent(object):
def __init__(self):
self.iblock = 0
self.source = ''
self.nsig = 0.
self.attribute_variation_sources = []
self.cache = {}
def register_function(self, event, name, function):
self.__dict__[name] = function
@pytest.fixture()
def event():
return DummyEvent()
@pytest.fixture()
def module():
return WeightMetTrigger(
correction_files = {
0: "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/mettrigger/met_trigger_correction_0mu.txt",
1: "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/mettrigger/met_trigger_correction_1mu.txt",
2: "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/mettrigger/met_trigger_correction_2mu.txt",
},
)
def test_weightmettrigger_init(module):
assert module.correction_files == {
0: "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/mettrigger/met_trigger_correction_0mu.txt",
1: "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/mettrigger/met_trigger_correction_1mu.txt",
2: "http://www.hep.ph.ic.ac.uk/~sdb15/Analysis/ZinvWidth/data/mettrigger/met_trigger_correction_2mu.txt",
}
assert module.cats == [0, 1, 2]
assert all([hasattr(module, attr) for attr in [
"xcents", "corr", "statup", "statdown", "systup", "systdown",
]])
@pytest.mark.parametrize(
"inputs,outputs", (
[{
"met": [-np.inf, -50., 0., 50., 100., 200., 200., 300., 1000., 2000., np.inf, np.nan],
"mucounts": [0, 0, 0, 0, 0, 1, 2, 3, np.nan, 0, 0, 0],
"nsig": 0.,
"source": "",
}, {
"eff": [0., 0., 0., 0.00085, 0.11995, 0.954119033, 0.9128533589, 1., 1., 1., 1., np.nan],
}], [{
"met": [-np.inf, -50., 0., 50., 100., 200., 200., 300., 1000., 2000., np.inf, np.nan],
"mucounts": [0, 0, 0, 0, 0, 1, 2, 3, np.nan, 0, 0, 0],
"nsig": 1.,
"source": "metTrigStat",
}, {
"eff": [0., 0., 0., 0.000850255, 0.120748, 0.960702425, 0.9245834611, 1., 1., 1., 1., np.nan],
}], [{
"met": [-np.inf, -50., 0., 50., 100., 200., 200., 300., 1000., 2000., np.inf, np.nan],
"mucounts": [0, 0, 0, 0, 0, 1, 2, 3, np.nan, 0, 0, 0],
"nsig": -1.,
"source": "metTrigStat",
}, {
"eff": [0., 0., 0., 0.000849785, 0.119164, 0.9473925085, 0.9009406438, 1., 1., 1., 1., np.nan],
}], [{
"met": [-np.inf, -50., 0., 50., 100., 200., 200., 300., 1000., 2000., np.inf, np.nan],
"mucounts": [0, 0, 0, 0, 0, 1, 2, 3, np.nan, 0, 0, 0],
"nsig": 1.,
"source": "metTrigSyst",
}, {
"eff": [0., 0., 0., 0.00085004, 0.1201, 0.9554547996, 0.915318063, 1., 1., 1., 1., np.nan],
}], [{
"met": [-np.inf, -50., 0., 50., 100., 200., 200., 300., 1000., 2000., np.inf, np.nan],
"mucounts": [0, 0, 0, 0, 0, 1, 2, 3, np.nan, 0, 0, 0],
"nsig": -1.,
"source": "metTrigSyst",
}, {
"eff": [0., 0., 0., 0.00084996, 0.1198, 0.95278326635380, 0.91038865487585, 1., 1., 1., 1., np.nan],
}],
)
)
def test_weightmettrigger_begin(module, event, inputs, outputs):
event.nsig = inputs["nsig"]
event.source = inputs["source"]
module.begin(event)
event.METnoX_pt = mock.Mock(
side_effect=lambda ev, source, nsig: np.array(inputs["met"], dtype=np.float32),
)
def mupt(ev, source, nsig, attr):
assert attr == "pt"
musele = DummyColl()
musele.counts = np.array(inputs["mucounts"], dtype=np.float32)
return musele
event.MuonSelection = mock.Mock(side_effect=mupt)
eff = event.WeightMETTrig(event, event.source, event.nsig)
oeff = np.array(outputs["eff"], dtype=np.float32)
print(eff)
print(oeff)
assert | np.allclose(eff, oeff, rtol=1e-5, equal_nan=True) | numpy.allclose |
from __future__ import division
import os
import numpy as np
from ..utils.importing import import_file
class ImageClassifier(object):
"""
ImageClassifier workflow.
This workflow is used to train image classification tasks, typically when
the dataset cannot be stored in memory.
Submissions need to contain two files, which by default are named:
image_preprocessor.py and batch_classifier.py (they can be modified
by changing `workflow_element_names`).
image_preprocessor.py needs a `tranform` function, which
is used for preprocessing the images. It takes an image as input
and it returns an image as an output. Optionally, image_preprocessor.py
can also have a function `transform_test`, which is used only to preprocess
images at test time. Otherwise, if `transform_test` does not exist,
`transform` is used at train and test time.
batch_classifier.py needs a `BatchClassifier` class, which implements
`fit` and `predict_proba`, where `fit` takes as input an instance
of `BatchGeneratorBuilder`.
Parameters
==========
test_batch_size : int
batch size used for testing.
chunk_size : int
size of the chunk used to load data from disk into memory.
(see at the top of the file what a chunk is and its difference
with the mini-batch size of neural nets).
n_jobs : int
the number of jobs used to load images from disk to memory as `chunks`.
n_classes : int
Total number of classes.
"""
def __init__(self, test_batch_size, chunk_size, n_jobs, n_classes,
workflow_element_names=[
'image_preprocessor', 'batch_classifier']):
self.element_names = workflow_element_names
self.test_batch_size = test_batch_size
self.chunk_size = chunk_size
self.n_jobs = n_jobs
self.n_classes = n_classes
def train_submission(self, module_path, folder_X_array, y_array,
train_is=None):
"""Train a batch image classifier.
module_path : str
module where the submission is. the folder of the module
have to contain batch_classifier.py and image_preprocessor.py.
X_array : ArrayContainer vector of int
vector of image IDs to train on
(it is named X_array to be coherent with the current API,
but as said here, it does not represent the data itself,
only image IDs).
y_array : vector of int
vector of image labels corresponding to X_train
train_is : vector of int
indices from X_array to train on
"""
folder, X_array = folder_X_array
if train_is is None:
train_is = slice(None, None, None)
image_preprocessor = import_file(module_path, self.element_names[0])
transform_img = image_preprocessor.transform
transform_test_img = getattr(image_preprocessor,
'transform_test',
transform_img)
batch_classifier = import_file(module_path, self.element_names[1])
clf = batch_classifier.BatchClassifier()
gen_builder = BatchGeneratorBuilder(
X_array[train_is], y_array[train_is],
transform_img, transform_test_img,
folder=folder,
chunk_size=self.chunk_size, n_classes=self.n_classes,
n_jobs=self.n_jobs)
clf.fit(gen_builder)
return transform_img, transform_test_img, clf
def test_submission(self, trained_model, folder_X_array):
"""Train a batch image classifier.
trained_model : tuple (function, Classifier)
tuple of a trained model returned by `train_submission`.
X_array : ArrayContainer of int
vector of image IDs to test on.
(it is named X_array to be coherent with the current API,
but as said here, it does not represent the data itself,
only image IDs).
"""
folder, X_array = folder_X_array
transform_img, transform_test_img, clf = trained_model
it = _chunk_iterator(
X_array, folder=folder, chunk_size=self.chunk_size)
y_proba = []
for X in it:
for i in range(0, len(X), self.test_batch_size):
# 1) Preprocessing
X_batch = X[i: i + self.test_batch_size]
# X_batch = Parallel(n_jobs=self.n_jobs, backend='threading')(
# delayed(transform_img)(x) for x in X_batch)
X_batch = [transform_test_img(x) for x in X_batch]
# X is a list of numpy arrrays at this point, convert it to a
# single numpy array.
try:
X_batch = [x[np.newaxis, :, :, :] for x in X_batch]
except IndexError:
# single channel
X_batch = [
x[np.newaxis, np.newaxis, :, :] for x in X_batch]
X_batch = np.concatenate(X_batch, axis=0)
# 2) Prediction
y_proba_batch = clf.predict_proba(X_batch)
y_proba.append(y_proba_batch)
y_proba = np.concatenate(y_proba, axis=0)
return y_proba
class BatchGeneratorBuilder(object):
"""A batch generator builder for generating images on the fly.
This class is a way to build training and
validation generators that yield each time a tuple (X, y) of mini-batches.
The generators are built in a way to fit into keras API of `fit_generator`
(see https://keras.io/models/model/).
An instance of this class is exposed to users `Classifier` through
the `fit` function : model fitting is called by using
"clf.fit(gen_builder)" where `gen_builder` is an instance
of this class : `BatchGeneratorBuilder`.
The fit function from `Classifier` should then use the instance
to build train and validation generators, using the method
`get_train_valid_generators`
Parameters
==========
X_array : ArrayContainer of int
vector of image IDs to train on
(it is named X_array to be coherent with the current API,
but as said here, it does not represent the data itself,
only image IDs).
y_array : vector of int
vector of image labels corresponding to `X_array`
folder : str
folder where the images are
chunk_size : int
size of the chunk used to load data from disk into memory.
(see at the top of the file what a chunk is and its difference
with the mini-batch size of neural nets).
n_classes : int
Total number of classes. This is needed because the array
of labels, which is a vector of ints, is transformed into
a onehot representation.
n_jobs : int
the number of jobs used to load images from disk to memory as `chunks`.
"""
def __init__(self, X_array, y_array,
transform_img, transform_test_img,
folder, chunk_size, n_classes, n_jobs):
self.X_array = X_array
self.y_array = y_array
self.transform_img = transform_img
self.transform_test_img = transform_test_img
self.folder = folder
self.chunk_size = chunk_size
self.n_classes = n_classes
self.n_jobs = n_jobs
self.nb_examples = len(X_array)
def get_train_valid_generators(self, batch_size=256, valid_ratio=0.1):
"""Build train and valid generators for keras.
This method is used by the user defined `Classifier` to o build train
and valid generators that will be used in keras `fit_generator`.
Parameters
==========
batch_size : int
size of mini-batches
valid_ratio : float between 0 and 1
ratio of validation data
Returns
=======
a 4-tuple (gen_train, gen_valid, nb_train, nb_valid) where:
- gen_train is a generator function for training data
- gen_valid is a generator function for valid data
- nb_train is the number of training examples
- nb_valid is the number of validation examples
The number of training and validation data are necessary
so that we can use the keras method `fit_generator`.
"""
nb_valid = int(valid_ratio * self.nb_examples)
nb_train = self.nb_examples - nb_valid
indices = np.arange(self.nb_examples)
train_indices = indices[0:nb_train]
valid_indices = indices[nb_train:]
gen_train = self._get_generator(
indices=train_indices, batch_size=batch_size)
gen_valid = self._get_generator(
indices=valid_indices, batch_size=batch_size)
return gen_train, gen_valid, nb_train, nb_valid
def _get_generator(self, indices=None, batch_size=256):
if indices is None:
indices = np.arange(self.nb_examples)
# Infinite loop, as required by keras `fit_generator`.
# However, as we provide the number of examples per epoch
# and the user specifies the total number of epochs, it will
# be able to end.
while True:
it = _chunk_iterator(
X_array=self.X_array[indices], folder=self.folder,
y_array=self.y_array[indices], chunk_size=self.chunk_size,
n_jobs=self.n_jobs)
for X, y in it:
# 1) Preprocessing of X and y
# X = Parallel(
# n_jobs=self.n_jobs, backend='threading')(delayed(
# self.transform_img)(x) for x in X)
X = np.array([self.transform_img(x) for x in X])
# # X is a list of numpy arrrays at this point, convert it to a
# single numpy array.
try:
X = [x[np.newaxis, :, :, :] for x in X]
except IndexError:
# single channel
X = [x[np.newaxis, np.newaxis, :, :] for x in X]
X = np.concatenate(X, axis=0)
X = np.array(X, dtype='float32')
# Convert y to onehot representation
y = _to_categorical(y, num_classes=self.n_classes)
# 2) Yielding mini-batches
for i in range(0, len(X), batch_size):
yield X[i:i + batch_size], y[i:i + batch_size]
def _chunk_iterator(X_array, folder, y_array=None, chunk_size=1024, n_jobs=8):
"""Generate chunks of images, optionally with their labels.
Parameters
==========
X_array : ArrayContainer of int
image ids to load
(it is named X_array to be coherent with the current API,
but as said here, it does not represent the data itself,
only image IDs).
y_array : vector of int
labels corresponding to each image from X_array
chunk_size : int
chunk size
folder : str
folder where the images are
n_jobs : int
number of jobs used to load images in parallel
Yields
======
if y_array is provided (not None):
it yields each time a tuple (X, y) where X is a list
of numpy arrays of images and y is a list of ints (labels).
The length of X and y is `chunk_size` at most (it can be smaller).
if y_array is not provided (it is None)
it yields each time X where X is a list of numpy arrays
of images. The length of X is `chunk_size` at most (it can be
smaller).
This is used for testing, where we don't have/need the labels.
The shape of each element of X in both cases
is (height, width, color), where color is 1 or 3 or 4 and height/width
vary according to examples (hence the fact that X is a list instead of
numpy array).
"""
from skimage.io import imread
from joblib import delayed
from joblib import Parallel
for i in range(0, len(X_array), chunk_size):
X_chunk = X_array[i:i + chunk_size]
filenames = [
os.path.join(folder, '{}'.format(x))
for x in X_chunk]
X = Parallel(n_jobs=n_jobs, backend='threading')(delayed(imread)(
filename) for filename in filenames)
if y_array is not None:
y = y_array[i:i + chunk_size]
yield X, y
else:
yield X
def _to_categorical(y, num_classes=None):
"""Convert a class vector (integers) to binary class matrix.
Taken from keras:
https://github.com/fchollet/keras/blob/master/keras/utils/np_utils.py
The reason it was taken from keras is to avoid importing theano which
clashes with pytorch.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input.
"""
y = np.array(y, dtype='int').ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[ | np.arange(n) | numpy.arange |
import numpy as np
from sklearn.linear_model import LinearRegression
import math
from itertools import combinations
def print_LR(X, w, y, add_bias = True, n = 5, verbose = False):
W = np.diag(w)
if verbose:
print(W)
Z1 = np.dot(np.dot(X.T,W), X)
Z2 = np.linalg.inv(Z1)
Z3 = np.dot(Z2, np.dot(X.T,W))
beta = np.dot(Z3, y)
if verbose:
print("Solution of regression:", beta)
print(beta[-2])
if add_bias:
weight_sum = np.sum(beta[:-1])
else:
weight_sum = np.sum(beta)
if verbose:
print("Sum of weights (w.o. bias) :", weight_sum)
if add_bias and verbose:
print("Bias :", beta[-1])
return beta[:-1]
def generate_X(n, interactions = [], add_bias = True):
'''
n : number of elements
interactions : [(0,1), (2,3), ..., (4,5)]
'''
X = []
m = n + len(interactions) + int(add_bias)
for i in range(n+1):
for index_array in list(combinations(range(n), i)):
x = np.zeros(m)
if len(index_array) > 0:
x[np.array(list(index_array))] = 1
for j, inter in enumerate(interactions):
if set(inter).issubset(set(index_array)):
x[j + n] = 1
if add_bias:
x[-1] = 1
X.append(x)
return | np.array(X) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_addons as tfa
import skopt as sk
from sklearn.model_selection import train_test_split
from tensorflow.keras import initializers
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras import backend as k
from sklearn.ensemble import RandomForestRegressor
import joblib
import stability as stab
from Data_processing import hough_theta, averaging_xy, threshold_theoretical
import multiprocessing as mp
def stability_generator(n_qd, res):
if n_qd == 2:
x, y, z, c, cc, dots = stab.stab_dqd(res)
return x, y, z, c, cc, dots
elif n_qd == 4:
x, y, z, c, cc, dots = stab.stab_fqd(res)
return x, y, z, c, cc, dots
else:
print('Error, n_qd can only take values of 2 or 4')
def stab_red(n_qd, res):
vg1, vg2, intensity, c, cc, dots = stability_generator(n_qd, res)
x, y, z, _, _, _ = threshold_theoretical(vg1, vg2, intensity)
xs, ys, _ = averaging_xy(x, y, z, int((len(x)) ** 0.5 * 10), int((len(x)) ** 0.2)) # Averaging points
return xs, ys, c, cc, dots
def line_to_theta(n_qd: int, res: int):
xs, ys, c, cc, dots = stab_red(n_qd, res)
theta = np.reshape(np.linspace(0, np.pi / 2, 500 + 1), (1, 500 + 1)) # Creating theta array
# frequency and theta values extracted from Hough transform
freq_t = hough_theta(np.transpose(np.vstack([xs, ys])), theta)
p = stab.analytical_grad(c, cc, dots)
return freq_t, p[:2]
def create_grad_library(n: int):
"""
Runs line_to_theta multiple times to create a pandas data-frame of histograms with corresponding gradients
@param n: number of inputs in data-frame
@return: pandas data-frame
"""
# TODO try to parallelize for loop
index = list(np.arange(0, 501 + 2, 1))
df = pd.DataFrame(columns=index)
for i in range(n):
try:
hist, grad = line_to_theta(int(2 * np.random.randint(1, 3, 1)), 300)
df = df.append(np.transpose(pd.DataFrame(np.append(hist, grad))))
except:
print('An error occurred')
return df
def save_df(df, name):
df.to_csv((str(name) + '.csv'), index=False)
def create_model(learning_rate, num_dense_layers, num_dense_nodes, dropout=0.05):
"""
Creates model architecture for a NN to train on given different parameters
@param learning_rate: learning rate of NN
@param num_dense_layers: number of layers in the NN
@param num_dense_nodes: number of nodes on each layer
@param dropout: dropout probability
@return: model to be trained
"""
# This model used ReLU for activation and a he_uniform initializer
model = Sequential()
model.add(Dense(501, activation='relu', input_shape=(501,), kernel_initializer=initializers.he_uniform()))
# create a loop making a new dense layer for the amount passed to this model.
# naming the layers helps avoid tensorflow error deep in the stack trace.
for i in range(num_dense_layers):
name = 'layer_dense_{0}'.format(i + 1)
model.add(Dense(num_dense_nodes,
activation='relu',
name=name,
kernel_initializer=initializers.he_uniform()
))
model.add(Dropout(dropout))
# add our classification layer.
model.add(Dense(3, activation='linear', kernel_initializer=initializers.he_uniform()))
# setup our optimizer and compile
adam = Adam(lr=learning_rate) # We use Adam as an optimiser
model.compile(optimizer=adam, loss='mean_squared_error', # MSE is the quantity we want to minimise
metrics=["mean_squared_error"])
return model
def bayesian_optimisation(x_train, y_train, param_0):
"""
Uses Bayesian optimisation to find optimal hyper-parameters to tune the NN
@param x_train: x values of training data
@param y_train: y values of training data
@param param_0: initial parameters
@return: optimised hyper-parameters for NN
"""
# Set boundaries within which optimized hyper-parameters need to be in
dim_learning_rate = sk.space.Real(low=1e-4, high=1e-2, prior='log-uniform',
name='learning_rate')
dim_num_dense_layers = sk.space.Integer(low=1, high=5, name='num_dense_layers')
dim_num_dense_nodes = sk.space.Integer(low=30, high=512, name='num_dense_nodes')
# dim_dropout = sk.space.Real(low=0, high=0.5, name='dropout')
dimensions = [dim_learning_rate,
dim_num_dense_layers,
dim_num_dense_nodes
# dim_dropout
]
# Function to evaluate validation MSE for given hyper-parameters
@sk.utils.use_named_args(dimensions=dimensions)
def fitness(learning_rate, num_dense_layers, num_dense_nodes, dropout):
# create model with given hyper-parameters
model = create_model(learning_rate=learning_rate,
num_dense_layers=num_dense_layers,
num_dense_nodes=num_dense_nodes,
# dropout=dropout
)
# Train NN with given hyper-parameters
blackbox = model.fit(x=x_train.values, # named blackbox because it represents the structure
y=y_train.values,
epochs=30,
validation_split=0.15,
)
# return the MSE for the last epoch.
rms = blackbox.history['val_mean_squared_error'][-1]
print()
print("MSE: {}".format(rms))
print()
del model
# Clear the Keras session, otherwise it will keep adding new
# models to the same TensorFlow graph each time we create
# a model with a different set of hyper-parameters.
k.clear_session()
return rms
# Use GP optimization to find best set of hyper-parameters
gp_result = sk.gp_minimize(func=fitness,
dimensions=dimensions,
n_calls=500,
x0=param_0)
return gp_result
def nn_training_data(name: str, split=0.10):
"""
Splits and normalises data_frame into train and test data
@param split: split between train and test data, default is 0.1
@param name: Name of pandas data frame to load
@return:
"""
# load data
df = pd.read_csv(name, index_col=False)
x, y = df.iloc[:, :501], df.iloc[:, 501:]
y = np.arctan(-1 / y)
# Split between training and test data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=split)
# use range to normalise data
r_x, y_train = np.std(x_train.values, axis=0), y_train * 2 / np.pi
x_train, x_test = x_train / r_x, x_test / r_x
return x_train, y_train, x_test, y_test, r_x
def RF_training(name: str, split=0.1):
# load data
x_train, y_train, x_test, y_test, r_x = nn_training_data(name, split)
rf = RandomForestRegressor(n_estimators=1000, random_state=42)
rf.fit(x_train.values, y_train.values)
return rf
def fit_model(model, x_train, y_train, val_split=0.15):
black_box = model.fit(x_train.values, y_train.values, epochs=100, verbose=0, validation_split=val_split)
return black_box
def predict_model(model, x_test):
return model.predict(x_test.values) * np.pi / 2
def evaluation(black_box):
"""
Plots learning curve of trained model
@param black_box: trained model
@return:
"""
history = black_box.history
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(history['mean_squared_error'], c='k')
plt.plot(history['val_mean_squared_error'], c='r')
plt.xlabel('Epochs', fontsize=18)
plt.ylabel('MSE', fontsize=18)
plt.subplot(1, 2, 2)
plt.plot(history['loss'], c='k')
plt.plot(history['val_loss'], c='r')
plt.xlabel('Epochs', fontsize=18)
plt.ylabel('Loss', fontsize=18)
plt.legend(('Test', 'Validation'))
plt.tight_layout()
return history
def save_model(name, model, r_x):
model.save((name + '.h5'))
drx = pd.DataFrame(r_x)
drx.to_csv(('rx_' + name + '.csv'), index=False)
def load_model(name):
model = tf.keras.models.load_model((name + '.h5'))
r_x = pd.read_csv(('rx_' + name + '.csv'), index_col=False)
return model, r_x
def predict_exp(model, freq, r_x):
index = list(np.arange(0, 501, 1))
df = pd.DataFrame(columns=index)
df = df.append(np.transpose(pd.DataFrame(freq)))
df = df / np.transpose(r_x.values)
t = model.predict(df.values) * np.pi / 2
return -1 / np.tan(t[0])
def load_all_models(n_models):
all_models = list()
reg_tree = joblib.load('models/tree_reg.sav')
all_models.append(reg_tree)
print('>loaded %s' % reg_tree)
for i in range(n_models):
# define filename for this ensemble
NN1 = 'models/NN_' + str(i) + '.h5'
# NN2 = 'models/NN_' + str(i*2+1) + '.h5'
RF = 'models/RF_' + str(i) + '.sav'
# load model from file
nn1 = tf.keras.models.load_model(NN1)
# nn2 = tf.keras.models.load_model(NN2)
rf = joblib.load(RF)
# add to list of members
all_models.append(nn1)
# all_models.append(nn2)
all_models.append(rf)
print('>loaded %s' % nn1)
# print('>loaded %s' % nn2)
print('>loaded %s' % rf)
return all_models
def stacked_predict(members, val):
predict = None
err_one, err_two = [], []
X = tf.constant(val)
i = 0
for model in members:
# make prediction
# stack predictions into [rows, members, probabilities]
if predict is None:
predict = model.predict(val)
else:
if i%2 == 0:
er_o, er_t = pred_ints(model, val)
err_one, err_two = | np.append(err_one, er_o) | numpy.append |
#!/usr/bin/env python3
import gc
import os
import sys
import matplotlib
matplotlib.use("Agg") # https://stackoverflow.com/questions/37604289
import numpy as np
import pandas as pd
from itertools import product
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pathlib import Path
from scipy.spatial.distance import pdist
import fennec
from fennec._utils import (
boxplot,
draw_2D_plot,
extract_cluster_silhouette,
isinteractive,
list_models,
load_models,
merge_models,
myKernelPCA,
pcacomp_to_model,
reassign_tiny_cluster_mustlink,
run_vbgmm,
)
# - check is we are in the correct conda env
if os.environ["CONDA_DEFAULT_ENV"] != "fennec2-dev":
raise Exception("Not in correct conda environment")
os.environ["OPENBLAS_NUM_THREADS"] = "16"
os.environ["MKL_NUM_THREADS"] = "16"
# -- functions in development ---------------------------------------------------
def _nonredondant_pairwise_index(x):
for i in range(len(x)):
for j in range(i, len(x)):
yield i, j
def _count_must_link_np(ci, cj):
"""Count number of mustlink for cluster `ci` and `cj`"""
import numpy as np
global D_ml, curated_vbgmm_clus
return D_ml[
np.ix_(
np.where(curated_vbgmm_clus == ci)[0], np.where(curated_vbgmm_clus == cj)[0]
)
].count_nonzero()
def _mysquareform(x, l):
"""Thanks to @pidupuis"""
import numpy as np
m = np.zeros([l, l]).astype(int)
xs, ys = np.triu_indices(l)
m[xs, ys] = m[ys, xs] = x
return m
def get_nb_mustlink_per_cluster(clus, D_ml, n_jobs=1, verbose=False):
"""
Return number of mustlink for each pair of cluster
"""
## (incredibly ugly) list comprehension
# return pd.DataFrame(data=_mysquareform([
# D_ml[
# np.ix_(
# np.where(clus == ci)[0],
# np.where(clus == cj)[0]
# )
# ].count_nonzero()
# for ci, cj in _nonredondant_pairwise_index(categ)],
# l=len(categ)).reshape(len(categ), len(categ)),
# index=categ, columns=categ)
import numpy as np
import pandas as pd
from time import sleep
from concurrent.futures import ProcessPoolExecutor
categ = np.unique(clus)
jobs = []
with ProcessPoolExecutor(max_workers=n_jobs) as pe:
for ci, cj in _nonredondant_pairwise_index(categ):
jobs.append(pe.submit(_count_must_link_np, categ[ci], categ[cj]))
if verbose:
while True:
nb_f = 0
for j in jobs:
if j.done():
nb_f += 1
if nb_f >= len(jobs):
print("100% finised")
break
else:
perc_f = nb_f / len(jobs) * 100
print("%05.2f%%" % perc_f, end="\r")
sleep(1)
cnt_ml = pd.DataFrame(
data=_mysquareform([j.result() for j in jobs], l=len(categ)).reshape(
len(categ), len(categ)
),
index=categ,
columns=categ,
)
return cnt_ml
def extract_unlink_clusters(curated_vbgmm_clus, D_ml, tol=0.9, verbose=True):
"""
If 2 clusters from `curated_vbgmm_clus` have at least 90% of must-link link from
`D_ml`, they are dropped from the cluster list and set as `remaining_ids`.
"""
import numpy as np, os
os.makedirs(f"{vbgmm_input_dir}/mustlink_info/", exist_ok=True)
cnt_ml = get_nb_mustlink_per_cluster(
curated_vbgmm_clus, D_ml, verbose=verbose, n_jobs=128
)
perc_ml = np.diag(cnt_ml) / cnt_ml.sum()
pd.concat([cnt_ml, perc_ml])
cnt_ml.to_csv(f"{vbgmm_input_dir}/mustlink_info/iter{n}_nb_mustlink.csv")
clustokeep = cnt_ml.index[perc_ml >= tol]
remaining_ids = cnt_ml.index[perc_ml < tol]
# return values
# validated clusters
retA = curated_vbgmm_clus[np.isin(curated_vbgmm_clus, clustokeep)]
# remainings ids
retB = curated_vbgmm_clus[np.isin(curated_vbgmm_clus, remaining_ids)]
# max nb of cluster
retC = len(retA)
return retA, retB, retC
# -------------------------------------------------------------------------------#
# -- SCRIPT STARTS HERE ---------------------------------------------------------#
# -------------------------------------------------------------------------------#
if isinteractive(): # debug args if script is run in python shell
h5file, label, overlap, init_type, mode, models_str = (
"DATA/S.completedata.l1000c10000o0.h5",
"S",
"0",
"mustlink",
"reassigntiny",
# "contig2vec4",
# "kmers4,contig2vec4,contig2vec6,ind15", # kmers5,
"contig2vec4,contig2vec6,cov_gattaca31,ind15,kmers1001001,kmers110011,kmers3,kmers4",
)
else:
if len(sys.argv) != 7:
raise Exception(
"usage: python3 fennec_cluster_extraction_pipeline.py <file.h5> <label> <overlap> <init_type> <mode> <model1,model2,modelN>"
)
else:
_, h5file, label, overlap, init_type, mode, models_str = sys.argv
assert init_type in ("kmeans", "mustlink"), "init_type is incorrect, got {}".format(
init_type
)
assert mode in ("nopostprocessing", "reassigntiny"), "mode is incorrect, got {}".format(
mode
)
# -- user input
# vbgmm_input_dir = f"run.{label}.output/"
vbgmm_input_dir = "/".join(
["NEW_FENNEC_RESULTS", label, overlap, init_type, mode, models_str, ""]
)
min_length = 1000 # minimum sequence length
max_cluster = 100 # maximum number of cluster to extract
max_iter = 10 # maximum number of iteration
# -- variables
wanted_models = models_str.split(",")
print(f"Models: {wanted_models}")
# -- check what type of input we have (probably h5 or fasta)
if not os.path.isfile(h5file):
print("[ERROR] can not find file '%s'. Exiting." % h5file)
# See: `fennec_sequence_characterization.py`
sys.exit(1)
# -- load data
raw_models, remaining_ids, D_ml = load_models(h5file, wanted_models)
print([(i, d.shape[1]) for i, d in raw_models.items()])
# -- set some parameters
kpca_params = {
"inertia": 0.85,
"n_jobs": 128,
"verbose": 3,
"t": 0.25,
} # see: help(myKernelPCA)
min_nb_seq = 50 # default value, will be updated later
max_pca_dim = min(250, sum([d.shape[1] for d in raw_models.values()]) // 3)
problematic_cluster = {} # final cluster (id: [seqid])
HISTORY = {} # store data, pca, clusterings, filtered clusterings, etc.
n = 0 # current iteration
# -- open report files
print(f"[INFO] Results will be saved in '{vbgmm_input_dir}'")
os.makedirs(vbgmm_input_dir, exist_ok=True)
os.makedirs(f"{vbgmm_input_dir}/comporigins/", exist_ok=True)
os.makedirs(f"{vbgmm_input_dir}/mustlink_info/", exist_ok=True)
pdf = PdfPages(
vbgmm_input_dir + "/vbgmm_iterative_extraction_" + label + ".pdf", keep_empty=False
)
# -- dev parameters
devmode = False # if True, stop at each iteration
force_gc = True # force garbage collection at the end of each iteration
draw_plot = True # draw 2D plot of each clustering (take some times)
# --------------------------- #
# -- main loop starts here -- #
# --------------------------- #
while True:
print(f"[INFO] --- ITERATION {n} {'-'*60}")
print(f"[INFO] Number of sequence to process: {len(remaining_ids)}")
# -- check if we have to continue
if n >= max_iter:
print("[END] It's already been %d iterations! Exiting." % n)
problematic_cluster["maxiter_" + str(n)] = remaining_ids
break
if max_cluster <= 1:
print("[END] I will search for only %d clusters Exiting." % max_cluster)
problematic_cluster["lastcluster_" + str(n)] = remaining_ids
break
# -- select features
print("[INFO] Merging models")
D, pca_components, pca_explained_variance_ratio, n_comp = merge_models(
raw_models, remaining_ids, pca_inertia=0.99, kpca_params=kpca_params
)
print("[INFO] Merging models produced %d components" % n_comp)
# - exporting the PCA explained variance ratio
with open(f"{vbgmm_input_dir}/pca_explvarratio.txt", "a") as outfile:
print(n, pca_explained_variance_ratio, file=outfile)
# -- check if we have to continue
if len(remaining_ids) < min_nb_seq:
print(
"[END] There is only %d sequences remaining (minimum %d per cluster). Exiting."
% (len(D), min_nb_seq)
)
problematic_cluster["notenough_" + str(n)] = remaining_ids
break
if n_comp > max_pca_dim:
print(
"[END] PCA produced %d components for %.2f %% of inertia (maximum is %d). Exiting."
% (n_comp, 100 * 0.9999, max_pca_dim)
)
problematic_cluster["unbinned_" + str(n)] = remaining_ids
break
# -- which models are used?
for c in range(min(10, n_comp)): # 10 first components
pcacomp_to_model(
D[c],
raw_models,
n,
outfile=f"{vbgmm_input_dir}/comporigins/pca_components_origin_comp{c}.csv",
)
# - reindex D_ml to fit the current `D.index`
print("[INFO] Reindexing must-link matrix")
argid = np.array([D.index.get_loc(x) for x in remaining_ids])
msh = np.meshgrid(argid, argid)
D_ml_sub = D_ml[msh[0], msh[1]]
# TODO: Check for "clusterability" before clustering (see: https://github.com/alimuldal/diptest)
# -- clustering
vbgmm_clus = run_vbgmm(
D,
pca_components,
D_ml_sub,
vbgmm_input_dir,
max_cluster,
min_length,
n,
seed=666,
init_type=init_type,
epsilon=1e-2,
verbose=2,
)
assert vbgmm_clus.shape[0] == len(
remaining_ids
), "[ERROR] Not all sequences were clustered!"
# -- clustering post processing
# - drop tiny cluster (reassign sequences eventually)
if mode == "reassigntiny":
curated_vbgmm_clus = reassign_tiny_cluster_mustlink(
vbgmm_clus, D_ml_sub, verbose=True
)
elif mode == "nopostprocessing":
curated_vbgmm_clus = vbgmm_clus
np.unique(curated_vbgmm_clus).size
# - TODO: merge clusters with enough must-link relationships
# _, _, _ = extract_unlink_clusters(
# curated_vbgmm_clus, D_ml_sub, verbose=True
# )
# count must link relationships between clusters
cnt_ml = get_nb_mustlink_per_cluster(
curated_vbgmm_clus, D_ml, verbose=True, n_jobs=128
)
perc_ml = | np.diag(cnt_ml) | numpy.diag |
from core.dynamics import AffineDynamics, ConfigurationDynamics, LearnedDynamics, PDDynamics, ScalarDynamics
from core.systems import Segway
from core.controllers import Controller, FBLinController, LQRController, FilterController, PDController, QPController
from core.util import differentiate
import matplotlib
from matplotlib.pyplot import cla, clf, figure, grid, legend, plot, savefig, show, subplot, title, xlabel, ylabel
from numpy import array, concatenate, dot, identity, linspace, ones, savetxt, size, sqrt, zeros
from numpy.random import uniform,seed
import tensorflow as tf
import numpy as np
import pdb
import pickle
import os
from tensorflow.python.client import device_lib
from utils.SegwaySupport import initializeSystem, initializeSafetyFilter, simulateSafetyFilter, SafetyAngleAngleRate
from utils.Plotting import plotTestStates, plotTrainStates, plotTrainMetaData, plotPhasePlane, plotLearnedCBF
from utils.AuxFunc import findSafetyData, findLearnedSafetyData_nn, postProcessEpisode, shuffle_downsample, standardize, generateInitialPoints
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from core.dynamics import LearnedAffineDynamics
from core.learning.keras import KerasResidualAffineModel
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Add, Dense, Dot, Input, Reshape, Lambda
class LearnedSegwaySafetyAAR_NN(LearnedAffineDynamics):
"""
Class to setup the CBF and derivatives
"""
def __init__(self, segway_safety_aar, scalar_res_aff_model):
self.dynamics = segway_safety_aar
self.res_model = scalar_res_aff_model
def process_drift(self, x, t):
dhdx = self.dynamics.dhdx( x, t )
return concatenate([x, dhdx])
def process_act(self, x, t):
dhdx = self.dynamics.dhdx( x, t )
return concatenate([x, dhdx])
def init_data(self, d_drift_in, d_act_in, m, d_out):
return [zeros((0, d_drift_in)), zeros((0, d_act_in)), zeros((0, m)), zeros(0)]
# Keras Residual Scalar Affine Model Definition
class KerasResidualScalarAffineModel(KerasResidualAffineModel):
def __init__(self, d_drift_in, d_act_in, d_hidden, m, d_out, us_std, optimizer='sgd', loss='mean_absolute_error'):
drift_model = Sequential()
drift_model.add(Dense(d_hidden, input_shape=(d_drift_in,), activation='relu'))
drift_model.add(Dense(d_out))
self.drift_model = drift_model
self.us_std = us_std
drift_inputs = Input((d_drift_in,))
drift_residuals = self.drift_model(drift_inputs)
act_model = Sequential()
act_model.add(Dense(d_hidden, input_shape=(d_act_in,), activation='relu'))
act_model.add(Dense(d_out * m))
act_model.add(Reshape((d_out, m)))
self.act_model = act_model
act_inputs = Input((d_act_in,))
act_residuals = self.act_model(act_inputs)
us = Input((m,))
residuals = Add()([drift_residuals, Dot([2, 1])([act_residuals, Lambda(lambda x: x/self.us_std)(us) ])])
model = Model([drift_inputs, act_inputs, us], residuals)
model.compile(optimizer, loss)
self.model = model
self.input_mean = None
self.input_std = None
def eval_drift(self, drift_input):
#print(drift_input,self.input_mean,self.input_std)
#print(array([(drift_input-self.input_mean)/self.input_std]))
return self.drift_model.predict(array([(drift_input-self.input_mean)/self.input_std]))[0][0]
def eval_act(self, act_input):
return self.act_model.predict(array([(act_input-self.input_mean)/self.input_std]))[0][0]/self.us_std
# Combined Controller
class CombinedController(Controller):
def __init__(self, controller_1, controller_2, weights):
self.controller_1 = controller_1
self.controller_2 = controller_2
self.weights = weights
def eval(self, x, t):
u_1 = self.controller_1.process( self.controller_1.eval( x, t ) )
u_2 = self.controller_2.process( self.controller_2.eval( x, t ) )
return self.weights[ 0 ] * u_1 + self.weights[ 1 ] * u_2
def standardize(data_train):
"""
Standardize a dataset to have zero mean and unit standard deviation.
:param data_train: 2-D Numpy array. Training data.
:param data_test: 2-D Numpy array. Test data.
:return: (train_set, test_set, mean, std), The standardized dataset and
their mean and standard deviation before processing.
"""
std = np.std(data_train, 0, keepdims=True)
std[std == 0] = 1
mean = | np.mean(data_train, 0, keepdims=True) | numpy.mean |