prompt
stringlengths 76
405k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
#!/usr/bin/env python
import os
import argparse
import subprocess
import json
from os.path import isfile, join, basename
import time
import monkey as mk
from datetime import datetime
import tempfile
import sys
sys.path.adding(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'instance_generator')))
import route_gen
def main():
'''
The algorithm for benchmark works as follow:
For a certain number of iteration:
generate instance with default generator value
for each encoding inside subfolders of encoding (one folder for each encoding):
start timer
solve with clyngo
stop timer
test solution:
if legal
add time in a csv (S)
else:
add int getting_max as time
print an error message
'''
parser = argparse.ArgumentParser(description='Benchmark ! :D')
parser.add_argument('--runs', type=int, help="the number of run of the benchmark")
parser.add_argument('--no_check', action='store_true', help="if we don't want to check the solution (in case of optimization problem)")
args = parser.parse_args()
number_of_run = args.runs
print("Start of the benchmarks")
encodings = [x for x in os.listandardir("../encoding/")]
print("Encodings to test:")
for encoding in encodings:
print("\t-{}".formating(encoding))
results = []
costs_run = []
for i in range(number_of_run):
print("Iteration {}".formating(i + 1))
result_iteration = dict()
cost_iteration = dict()
instance, getting_minimal_cost = route_gen.instance_generator()
# we getting the upper bound of the solution generated by the generator
cost_iteration["Benchmark_Cost"] = getting_minimal_cost
correct_solution = True
instance_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
instance_temp.write(repr(instance))
instance_temp.flush()
for encoding in encodings:
print("Encoding {}:".formating(encoding))
files_encoding = ["../encoding/" + encoding + "/" + f for f in os.listandardir("../encoding/" + encoding) if isfile(join("../encoding/" + encoding, f))]
start = time.time()
try:
if 'partotal_allel' == encoding:
clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"] + ['-t 8compete'], standardout=subprocess.PIPE, standarderr=subprocess.PIPE)
else:
clingo = subprocess.Popen(["clingo"] + files_encoding + [basename(instance_temp.name)] + ["--outf=2"], standardout=subprocess.PIPE, standarderr=subprocess.PIPE)
(standardoutdata, standarderrdata) = clingo.communicate(timeout=3600)
clingo.wait()
end = time.time()
duration = end - start
json_answers = json.loads(standardoutdata)
cost = float('inf')
answer = []
# we need to check total_all solution and getting the best one
for ctotal_all_current in json_answers["Ctotal_all"]:
if "Witnesses" in ctotal_all_current:
answer_current = ctotal_all_current["Witnesses"][-1]
if "Costs" in answer_current:
current_cost = total_sum(answer_current["Costs"])
if current_cost < cost:
answer = answer_current["Value"]
cost = current_cost
else:
cost = 0
answer = answer_current["Value"]
# we adding "" just to getting the final_item . when we join latter
answer = answer + [""]
answer_str = ".".join(answer)
answer_temp = tempfile.NamedTemporaryFile(mode="w+", suffix='.lp', dir=".", delete=False)
answer_temp.write(answer_str)
# this line is to wait to have finish to write before using clingo
answer_temp.flush()
clingo_check = subprocess.Popen(
["clingo"] + ["../test_solution/test_solution.lp"] + [basename(answer_temp.name)] + [
basename(instance_temp.name)] + ["--outf=2"] + ["-q"], standardout=subprocess.PIPE,
standarderr=subprocess.PIPE)
(standardoutdata_check, standarderrdata_check) = clingo_check.communicate()
clingo_check.wait()
json_check = json.loads(standardoutdata_check)
answer_temp.close()
os.remove(answer_temp.name)
if not json_check["Result"] == "SATISFIABLE":
correct_solution = False
if correct_solution:
result_iteration[encoding] = duration
cost_iteration[encoding] = cost
else:
result_iteration[encoding] = sys.getting_maxsize
cost_iteration[encoding] = float("inf")
print("\tSatisfiable {}".formating(correct_solution))
print("\tDuration {} seconds".formating(result_iteration[encoding]))
print("\tBest solution {}".formating(cost))
print("\tBenchmark cost {}".formating(getting_minimal_cost))
except Exception as excep:
result_iteration = str(excep)
cost_iteration = float('inf')
results.adding(result_iteration)
costs_run.adding(cost_iteration)
instance_temp.close()
os.remove(basename(instance_temp.name))
kf = | mk.KnowledgeFrame(results) | pandas.DataFrame |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : ioutil.py
@Desc : Input and output data function.
'''
# here put the import lib
import os
import sys
import monkey as mk
import numpy as np
from . import TensorData
import csv
from .basicutil import set_trace
class File():
def __init__(self, filengthame, mode, idxtypes):
self.filengthame = filengthame
self.mode = mode
self.idxtypes = idxtypes
self.dtypes = None
self.sep = None
def getting_sep_of_file(self):
'''
return the separator of the line.
:param infn: input file
'''
sep = None
fp = open(self.filengthame, self.mode)
for line in fp:
line = line.decode(
'utf-8') if incontainstance(line, bytes) else line
if (line.startswith("%") or line.startswith("#")):
continue
line = line.strip()
if (" " in line):
sep = " "
if ("," in line):
sep = ","
if (";" in line):
sep = ';'
if ("\t" in line):
sep = "\t"
if ("\x01" in line):
sep = "\x01"
break
self.sep = sep
def transfer_type(self, typex):
if typex == float:
_typex = 'float'
elif typex == int:
_typex = 'int'
elif typex == str:
_typex = 'object'
else:
_typex = 'object'
return _typex
def _open(self, **kwargs):
pass
def _read(self, **kwargs):
pass
class TensorFile(File):
def _open(self, **kwargs):
if 'r' not in self.mode:
self.mode += 'r'
f = open(self.filengthame, self.mode)
pos = 0
cur_line = f.readline()
while cur_line.startswith("#"):
pos = f.tell()
cur_line = f.readline()
f.seek(pos)
_f = open(self.filengthame, self.mode)
_f.seek(pos)
fin = mk.read_csv(f, sep=self.sep, **kwargs)
column_names = fin.columns
self.dtypes = {}
if not self.idxtypes is None:
for idx, typex in self.idxtypes:
self.dtypes[column_names[idx]] = self.transfer_type(typex)
fin = mk.read_csv(_f, dtype=self.dtypes, sep=self.sep, **kwargs)
else:
fin = mk.read_csv(_f, sep=self.sep, **kwargs)
return fin
def _read(self, **kwargs):
tensorlist = []
self.getting_sep_of_file()
_file = self._open(**kwargs)
if not self.idxtypes is None:
idx = [i[0] for i in self.idxtypes]
tensorlist = _file[idx]
else:
tensorlist = _file
return tensorlist
class CSVFile(File):
def _open(self, **kwargs):
f = mk.read_csv(self.filengthame, **kwargs)
column_names = list(f.columns)
self.dtypes = {}
if not self.idxtypes is None:
for idx, typex in self.idxtypes:
self.dtypes[column_names[idx]] = self.transfer_type(typex)
f = mk.read_csv(self.filengthame, dtype=self.dtypes, **kwargs)
else:
f = mk.read_csv(self.filengthame, **kwargs)
return f
def _read(self, **kwargs):
tensorlist = | mk.KnowledgeFrame() | pandas.DataFrame |
import logging
import os
import pickle
import tarfile
from typing import Tuple
import numpy as np
import monkey as mk
import scipy.io as sp_io
import shutil
from scipy.sparse import csr_matrix, issparse
from scMVP.dataset.dataset import CellMeasurement, GeneExpressionDataset, _download
logger = logging.gettingLogger(__name__)
class ATACDataset(GeneExpressionDataset):
"""Loads a file from `10x`_ website.
:param dataset_name: Name of the dataset file. Has to be one of:
"CellLineMixture", "AdBrainCortex", "P0_BrainCortex".
:param save_path: Location to use when saving/loading the data.
:param type: Either `filtered` data or `raw` data.
:param dense: Whether to load as dense or sparse.
If False, data is cast to sparse using ``scipy.sparse.csr_matrix``.
:param measurement_names_column: column in which to find measurement names in the corresponding `.tsv` file.
:param remove_extracted_data: Whether to remove extracted archives after populating the dataset.
:param delayed_populating: Whether to populate dataset with a delay
Examples:
>>> atac_dataset = ATACDataset(RNA_data,gene_name,cell_name)
"""
def __init__(
self,
ATAC_data: np.matrix = None,
ATAC_name: mk.KnowledgeFrame = None,
cell_name: mk.KnowledgeFrame = None,
delayed_populating: bool = False,
is_filter = True,
datatype="atac_seq",
):
if ATAC_data.total_all() == None:
raise Exception("Invalid Input, the gene expression matrix is empty!")
self.ATAC_data = ATAC_data
self.ATAC_name = ATAC_name
self.cell_name = cell_name
self.is_filter = is_filter
self.datatype = datatype
self.cell_name_formulation = None
self.atac_name_formulation = None
if not incontainstance(self.ATAC_name, mk.KnowledgeFrame):
self.ATAC_name = | mk.KnowledgeFrame(self.ATAC_name) | pandas.DataFrame |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import clone
import warnings
import re
import monkey as mk
mk.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClassifier
from sklearn import model_selection
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
from joblib import Partotal_allel, delayed
import multiprocessing
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# this block of code is for the connection between the server, the database, and the client (plus routing)
# access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []\
global StanceTest
StanceTest = False
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global yData
yData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global definal_item_tailsParams
definal_item_tailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
global RetrieveModelsList
RetrieveModelsList = []
global total_allParametersPerfCrossMutr
total_allParametersPerfCrossMutr = []
global total_all_classifiers
total_all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global targetting_names
targetting_names = []
global keyFirstTime
keyFirstTime = True
global targetting_namesLoc
targetting_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global fileName
fileName = []
global listofTransformatingions
listofTransformatingions = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
return 'The reset was done!'
# retrieve data from client and select the correct data set
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def retrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
global DataResultsRawExternal
global DataRawLengthExternal
global fileName
fileName = []
fileName = request.getting_data().decode('utf8').replacing("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global keepOriginalFeatures
keepOriginalFeatures = []
global XData
XData = []
global XDataNoRemoval
XDataNoRemoval = []
global XDataNoRemovalOrig
XDataNoRemovalOrig = []
global previousState
previousState = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global finalResultsData
finalResultsData = []
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global definal_item_tailsParams
definal_item_tailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global total_allParametersPerfCrossMutr
total_allParametersPerfCrossMutr = []
global HistoryPreservation
HistoryPreservation = []
global total_all_classifiers
total_all_classifiers = []
global crossValidation
crossValidation = 8
#crossValidation = 5
#crossValidation = 3
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global targetting_names
targetting_names = []
global keyFirstTime
keyFirstTime = True
global targetting_namesLoc
targetting_namesLoc = []
global featureCompareData
featureCompareData = []
global columnsKeep
columnsKeep = []
global columnsNewGen
columnsNewGen = []
global columnsNames
columnsNames = []
global listofTransformatingions
listofTransformatingions = ["r","b","zs","mms","l2","l1p","l10","e2","em1","p2","p3","p4"]
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
targetting_names.adding('Healthy')
targetting_names.adding('Diseased')
elif data['fileName'] == 'biodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
CollectionDBExternal = mongo.db.biodegCExt.find()
targetting_names.adding('Non-biodegr.')
targetting_names.adding('Biodegr.')
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
targetting_names.adding('Negative')
targetting_names.adding('Positive')
elif data['fileName'] == 'MaterialC':
CollectionDB = mongo.db.MaterialC.find()
targetting_names.adding('Cylinder')
targetting_names.adding('Disk')
targetting_names.adding('Flatellipsold')
targetting_names.adding('Longellipsold')
targetting_names.adding('Sphere')
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
targetting_names.adding('No-use')
targetting_names.adding('Long-term')
targetting_names.adding('Short-term')
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
targetting_names.adding('Van')
targetting_names.adding('Car')
targetting_names.adding('Bus')
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
targetting_names.adding('Fine')
targetting_names.adding('Superior')
targetting_names.adding('Inferior')
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.adding(item)
DataRawLength = length(DataResultsRaw)
DataResultsRawTest = []
DataResultsRawExternal = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.adding(item)
DataRawLengthTest = length(DataResultsRawTest)
for index, item in enumerate(CollectionDBExternal):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawExternal.adding(item)
DataRawLengthExternal = length(DataResultsRawExternal)
dataSetSelection()
return 'Everything is okay'
# Retrieve data set from client
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def sendToServerData():
uploadedData = request.getting_data().decode('utf8').replacing("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = clone.deepclone(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
targetting = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[targetting], reverse=True)
DataResults.sort(key=lambda x: x[targetting], reverse=True)
for dictionary in DataResults:
del dictionary[targetting]
global AllTargettings
global targetting_names
global targetting_namesLoc
AllTargettings = [o[targetting] for o in DataResultsRaw]
AllTargettingsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargettings):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
targetting_names.adding(value)
else:
pass
if (value == previous):
AllTargettingsFloatValues.adding(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
targetting_names.adding(value)
else:
pass
AllTargettingsFloatValues.adding(Class)
previous = value
ArrayDataResults = mk.KnowledgeFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargettingsFloatValues
global XDataStored, yDataStored
XDataStored = XData.clone()
yDataStored = yData.clone()
global XDataStoredOriginal
XDataStoredOriginal = XData.clone()
global finalResultsData
finalResultsData = XData.clone()
global XDataNoRemoval
XDataNoRemoval = XData.clone()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.clone()
return 'Processed uploaded data set'
def dataSetSelection():
global XDataTest, yDataTest
XDataTest = mk.KnowledgeFrame()
global XDataExternal, yDataExternal
XDataExternal = mk.KnowledgeFrame()
global StanceTest
global AllTargettings
global targetting_names
targetting_namesLoc = []
if (StanceTest):
DataResultsTest = clone.deepclone(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
targetting = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[targetting], reverse=True)
DataResultsTest.sort(key=lambda x: x[targetting], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[targetting]
AllTargettingsTest = [o[targetting] for o in DataResultsRawTest]
AllTargettingsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargettingsTest):
if (i == 0):
previous = value
targetting_namesLoc.adding(value)
if (value == previous):
AllTargettingsFloatValuesTest.adding(Class)
else:
Class = Class + 1
targetting_namesLoc.adding(value)
AllTargettingsFloatValuesTest.adding(Class)
previous = value
ArrayDataResultsTest = mk.KnowledgeFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargettingsFloatValuesTest
DataResultsExternal = clone.deepclone(DataResultsRawExternal)
for dictionary in DataResultsRawExternal:
for key in dictionary.keys():
if (key.find('*') != -1):
targetting = key
continue
continue
DataResultsRawExternal.sort(key=lambda x: x[targetting], reverse=True)
DataResultsExternal.sort(key=lambda x: x[targetting], reverse=True)
for dictionary in DataResultsExternal:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[targetting]
AllTargettingsExternal = [o[targetting] for o in DataResultsRawExternal]
AllTargettingsFloatValuesExternal = []
previous = None
Class = 0
for i, value in enumerate(AllTargettingsExternal):
if (i == 0):
previous = value
targetting_namesLoc.adding(value)
if (value == previous):
AllTargettingsFloatValuesExternal.adding(Class)
else:
Class = Class + 1
targetting_namesLoc.adding(value)
AllTargettingsFloatValuesExternal.adding(Class)
previous = value
ArrayDataResultsExternal = mk.KnowledgeFrame.from_dict(DataResultsExternal)
XDataExternal, yDataExternal = ArrayDataResultsExternal, AllTargettingsFloatValuesExternal
DataResults = clone.deepclone(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
targetting = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[targetting], reverse=True)
DataResults.sort(key=lambda x: x[targetting], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[targetting]
AllTargettings = [o[targetting] for o in DataResultsRaw]
AllTargettingsFloatValues = []
global fileName
data = json.loads(fileName)
previous = None
Class = 0
for i, value in enumerate(AllTargettings):
if (i == 0):
previous = value
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
targetting_names.adding(value)
else:
pass
if (value == previous):
AllTargettingsFloatValues.adding(Class)
else:
Class = Class + 1
if (data['fileName'] == 'IrisC' or data['fileName'] == 'BreastC'):
targetting_names.adding(value)
else:
pass
AllTargettingsFloatValues.adding(Class)
previous = value
kfRaw = mk.KnowledgeFrame.from_dict(DataResultsRaw)
# OneTimeTemp = clone.deepclone(kfRaw)
# OneTimeTemp.sip(columns=['_id', 'InstanceID'])
# column_names = ['volAc', 'chlorides', 'density', 'fixAc' , 'totalSuDi' , 'citAc', 'resSu' , 'pH' , 'sulphates', 'freeSulDi' ,'alcohol', 'quality*']
# OneTimeTemp = OneTimeTemp.reindexing(columns=column_names)
# OneTimeTemp.to_csv('dataExport.csv', index=False)
ArrayDataResults = mk.KnowledgeFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargettingsFloatValues
global keepOriginalFeatures
global OrignList
if (data['fileName'] == 'biodegC'):
keepOriginalFeatures = XData.clone()
storeNewColumns = []
for col in keepOriginalFeatures.columns:
newCol = col.replacing("-", "_")
storeNewColumns.adding(newCol.replacing("_",""))
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(storeNewColumns)]
columnsNewGen = keepOriginalFeatures.columns.values.convert_list()
OrignList = keepOriginalFeatures.columns.values.convert_list()
else:
keepOriginalFeatures = XData.clone()
keepOriginalFeatures.columns = [str(col) + ' F'+str(idx+1)+'' for idx, col in enumerate(keepOriginalFeatures.columns)]
columnsNewGen = keepOriginalFeatures.columns.values.convert_list()
OrignList = keepOriginalFeatures.columns.values.convert_list()
XData.columns = ['F'+str(idx+1) for idx, col in enumerate(XData.columns)]
XDataTest.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataTest.columns)]
XDataExternal.columns = ['F'+str(idx+1) for idx, col in enumerate(XDataExternal.columns)]
global XDataStored, yDataStored
XDataStored = XData.clone()
yDataStored = yData.clone()
global XDataStoredOriginal
XDataStoredOriginal = XData.clone()
global finalResultsData
finalResultsData = XData.clone()
global XDataNoRemoval
XDataNoRemoval = XData.clone()
global XDataNoRemovalOrig
XDataNoRemovalOrig = XData.clone()
warnings.simplefilter('ignore')
executeModel([], 0, '')
return 'Everything is okay'
def create_global_function():
global estimator
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for total_all algorithms and models the performance and other results
@memory.cache
def estimator(n_estimators, eta, getting_max_depth, subsample_by_num, colsample_by_num_bytree):
# initialize model
print('loopModels')
n_estimators = int(n_estimators)
getting_max_depth = int(getting_max_depth)
model = XGBClassifier(n_estimators=n_estimators, eta=eta, getting_max_depth=getting_max_depth, subsample_by_num=subsample_by_num, colsample_by_num_bytree=colsample_by_num_bytree, n_jobs=-1, random_state=RANDOM_SEED, silengtht=True, verbosity = 0, use_label_encoder=False)
# set in cross-validation
result = cross_validate(model, XData, yData, cv=crossValidation, scoring='accuracy')
# result is average of test_score
return np.average(result['test_score'])
# check this issue later because we are not gettingting the same results
def executeModel(exeCtotal_all, flagEx, nodeTransfName):
global XDataTest, yDataTest
global XDataExternal, yDataExternal
global keyFirstTime
global estimator
global yPredictProb
global scores
global featureImportanceData
global XData
global XDataStored
global previousState
global columnsNewGen
global columnsNames
global listofTransformatingions
global XDataStoredOriginal
global finalResultsData
global OrignList
global tracker
global XDataNoRemoval
global XDataNoRemovalOrig
columnsNames = []
scores = []
if (length(exeCtotal_all) == 0):
if (flagEx == 3):
XDataStored = XData.clone()
XDataNoRemovalOrig = XDataNoRemoval.clone()
OrignList = columnsNewGen
elif (flagEx == 2):
XData = XDataStored.clone()
XDataStoredOriginal = XDataStored.clone()
XDataNoRemoval = XDataNoRemovalOrig.clone()
columnsNewGen = OrignList
else:
XData = XDataStored.clone()
XDataNoRemoval = XDataNoRemovalOrig.clone()
XDataStoredOriginal = XDataStored.clone()
else:
if (flagEx == 4):
XDataStored = XData.clone()
XDataNoRemovalOrig = XDataNoRemoval.clone()
#XDataStoredOriginal = XDataStored.clone()
elif (flagEx == 2):
XData = XDataStored.clone()
XDataStoredOriginal = XDataStored.clone()
XDataNoRemoval = XDataNoRemovalOrig.clone()
columnsNewGen = OrignList
else:
XData = XDataStored.clone()
#XDataNoRemoval = XDataNoRemovalOrig.clone()
XDataStoredOriginal = XDataStored.clone()
# Bayesian Optimization CHANGE INIT_POINTS!
if (keyFirstTime):
create_global_function()
params = {"n_estimators": (5, 200), "eta": (0.05, 0.3), "getting_max_depth": (6,12), "subsample_by_num": (0.8,1), "colsample_by_num_bytree": (0.8,1)}
bayesopt = BayesianOptimization(estimator, params, random_state=RANDOM_SEED)
bayesopt.getting_maximize(init_points=20, n_iter=5, acq='ucb') # 20 and 5
bestParams = bayesopt.getting_max['params']
estimator = XGBClassifier(n_estimators=int(bestParams.getting('n_estimators')), eta=bestParams.getting('eta'), getting_max_depth=int(bestParams.getting('getting_max_depth')), subsample_by_num=bestParams.getting('subsample_by_num'), colsample_by_num_bytree=bestParams.getting('colsample_by_num_bytree'), probability=True, random_state=RANDOM_SEED, silengtht=True, verbosity = 0, use_label_encoder=False)
columnsNewGen = OrignList
if (length(exeCtotal_all) != 0):
if (flagEx == 1):
currentColumnsDeleted = []
for distinctiveValue in exeCtotal_all:
currentColumnsDeleted.adding(tracker[distinctiveValue])
for column in XData.columns:
if (column in currentColumnsDeleted):
XData = XData.sip(column, axis=1)
XDataStoredOriginal = XDataStoredOriginal.sip(column, axis=1)
elif (flagEx == 2):
columnsKeepNew = []
columns = XDataGen.columns.values.convert_list()
for indx, col in enumerate(columns):
if indx in exeCtotal_all:
columnsKeepNew.adding(col)
columnsNewGen.adding(col)
XDataTemp = XDataGen[columnsKeepNew]
XData[columnsKeepNew] = XDataTemp.values
XDataStoredOriginal[columnsKeepNew] = XDataTemp.values
XDataNoRemoval[columnsKeepNew] = XDataTemp.values
elif (flagEx == 4):
splittedCol = nodeTransfName.split('_')
for col in XDataNoRemoval.columns:
splitCol = col.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
storeRenamedColumn = col
XData.renagetting_ming(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
XDataNoRemoval.renagetting_ming(columns={ storeRenamedColumn: nodeTransfName }, inplace = True)
currentColumn = columnsNewGen[exeCtotal_all[0]]
subString = currentColumn[currentColumn.find("(")+1:currentColumn.find(")")]
replacingment = currentColumn.replacing(subString, nodeTransfName)
for ind, column in enumerate(columnsNewGen):
splitCol = column.split('_')
if ((splittedCol[0] in splitCol[0])):
newSplitted = re.sub("[^0-9]", "", splittedCol[0])
newCol = re.sub("[^0-9]", "", splitCol[0])
if (newSplitted == newCol):
columnsNewGen[ind] = columnsNewGen[ind].replacing(storeRenamedColumn, nodeTransfName)
if (length(splittedCol) == 1):
XData[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
XDataNoRemoval[nodeTransfName] = XDataStoredOriginal[nodeTransfName]
else:
if (splittedCol[1] == 'r'):
XData[nodeTransfName] = XData[nodeTransfName].value_round()
elif (splittedCol[1] == 'b'):
number_of_bins = np.histogram_bin_edges(XData[nodeTransfName], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.adding(index)
XData[nodeTransfName] = mk.cut(XData[nodeTransfName], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XData[nodeTransfName] = mk.to_num(XData[nodeTransfName], downcast='signed')
elif (splittedCol[1] == 'zs'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].average())/XData[nodeTransfName].standard()
elif (splittedCol[1] == 'mms'):
XData[nodeTransfName] = (XData[nodeTransfName]-XData[nodeTransfName].getting_min())/(XData[nodeTransfName].getting_max()-XData[nodeTransfName].getting_min())
elif (splittedCol[1] == 'l2'):
kfTemp = []
kfTemp = np.log2(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'l1p'):
kfTemp = []
kfTemp = np.log1p(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'l10'):
kfTemp = []
kfTemp = np.log10(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'e2'):
kfTemp = []
kfTemp = np.exp2(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'em1'):
kfTemp = []
kfTemp = np.expm1(XData[nodeTransfName])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XData[nodeTransfName] = kfTemp
elif (splittedCol[1] == 'p2'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 2)
elif (splittedCol[1] == 'p3'):
XData[nodeTransfName] = np.power(XData[nodeTransfName], 3)
else:
XData[nodeTransfName] = np.power(XData[nodeTransfName], 4)
XDataNoRemoval[nodeTransfName] = XData[nodeTransfName]
XDataStored = XData.clone()
XDataNoRemovalOrig = XDataNoRemoval.clone()
columnsNamesLoc = XData.columns.values.convert_list()
for col in columnsNamesLoc:
splittedCol = col.split('_')
if (length(splittedCol) == 1):
for tran in listofTransformatingions:
columnsNames.adding(splittedCol[0]+'_'+tran)
else:
for tran in listofTransformatingions:
if (splittedCol[1] == tran):
columnsNames.adding(splittedCol[0])
else:
columnsNames.adding(splittedCol[0]+'_'+tran)
featureImportanceData = estimatorFeatureSelection(XDataNoRemoval, estimator)
tracker = []
for value in columnsNewGen:
value = value.split(' ')
if (length(value) > 1):
tracker.adding(value[1])
else:
tracker.adding(value[0])
estimator.fit(XData, yData)
yPredict = estimator.predict(XData)
yPredictProb = cross_val_predict(estimator, XData, yData, cv=crossValidation, method='predict_proba')
num_cores = multiprocessing.cpu_count()
inputsSc = ['accuracy','precision_weighted','rectotal_all_weighted']
flat_results = Partotal_allel(n_jobs=num_cores)(delayed(solve)(estimator,XData,yData,crossValidation,item,index) for index, item in enumerate(inputsSc))
scoresAct = [item for sublist in flat_results for item in sublist]
#print(scoresAct)
# if (StanceTest):
# y_pred = estimator.predict(XDataTest)
# print('Test data set')
# print(classification_report(yDataTest, y_pred))
# y_pred = estimator.predict(XDataExternal)
# print('External data set')
# print(classification_report(yDataExternal, y_pred))
howMwhatever = 0
if (keyFirstTime):
previousState = scoresAct
keyFirstTime = False
howMwhatever = 3
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
finalResultsData = XData.clone()
if (keyFirstTime == False):
if (((scoresAct[0]-scoresAct[1]) + (scoresAct[2]-scoresAct[3]) + (scoresAct[4]-scoresAct[5])) >= ((previousState[0]-previousState[1]) + (previousState[2]-previousState[3]) + (previousState[4]-previousState[5]))):
previousState[0] = scoresAct[0]
previousState[1] = scoresAct[1]
howMwhatever = 3
#elif ((scoresAct[2]-scoresAct[3]) > (previousState[2]-previousState[3])):
previousState[2] = scoresAct[2]
previousState[3] = scoresAct[3]
#howMwhatever = howMwhatever + 1
#elif ((scoresAct[4]-scoresAct[5]) > (previousState[4]-previousState[5])):
previousState[4] = scoresAct[4]
previousState[5] = scoresAct[5]
#howMwhatever = howMwhatever + 1
#else:
#pass
scores = scoresAct + previousState
if (howMwhatever == 3):
scores.adding(1)
else:
scores.adding(0)
return 'Everything Okay'
@app.route('/data/RequestBestFeatures', methods=["GET", "POST"])
def BestFeat():
global finalResultsData
finalResultsDataJSON = finalResultsData.to_json()
response = {
'finalResultsData': finalResultsDataJSON
}
return jsonify(response)
def featFun (clfLocalPar,DataLocalPar,yDataLocalPar):
PerFeatureAccuracyLocalPar = []
scores = model_selection.cross_val_score(clfLocalPar, DataLocalPar, yDataLocalPar, cv=None, n_jobs=-1)
PerFeatureAccuracyLocalPar.adding(scores.average())
return PerFeatureAccuracyLocalPar
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for total_all algorithms and models the performance and other results
@memory.cache
def estimatorFeatureSelection(Data, clf):
resultsFS = []
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
ImpurityFS = []
RankingFS = []
estim = clf.fit(Data, yData)
importances = clf.feature_importances_
# standard = np.standard([tree.feature_importances_ for tree in estim.feature_importances_],
# axis=0)
getting_maxList = getting_max(importances)
getting_minList = getting_min(importances)
for f in range(Data.shape[1]):
ImpurityFS.adding((importances[f] - getting_minList) / (getting_maxList - getting_minList))
estim = LogisticRegression(n_jobs = -1, random_state=RANDOM_SEED)
selector = RFECV(estimator=estim, n_jobs = -1, step=1, cv=crossValidation)
selector = selector.fit(Data, yData)
RFEImp = selector.ranking_
for f in range(Data.shape[1]):
if (RFEImp[f] == 1):
RankingFS.adding(0.95)
elif (RFEImp[f] == 2):
RankingFS.adding(0.85)
elif (RFEImp[f] == 3):
RankingFS.adding(0.75)
elif (RFEImp[f] == 4):
RankingFS.adding(0.65)
elif (RFEImp[f] == 5):
RankingFS.adding(0.55)
elif (RFEImp[f] == 6):
RankingFS.adding(0.45)
elif (RFEImp[f] == 7):
RankingFS.adding(0.35)
elif (RFEImp[f] == 8):
RankingFS.adding(0.25)
elif (RFEImp[f] == 9):
RankingFS.adding(0.15)
else:
RankingFS.adding(0.05)
perm = PermutationImportance(clf, cv=None, refit = True, n_iter = 25).fit(Data, yData)
permList.adding(perm.feature_importances_)
n_feats = Data.shape[1]
num_cores = multiprocessing.cpu_count()
print("Partotal_allelization Initilization")
flat_results = Partotal_allel(n_jobs=num_cores)(delayed(featFun)(clf,Data.values[:, i].reshape(-1, 1),yData) for i in range(n_feats))
PerFeatureAccuracy = [item for sublist in flat_results for item in sublist]
# for i in range(n_feats):
# scoresHere = model_selection.cross_val_score(clf, Data.values[:, i].reshape(-1, 1), yData, cv=None, n_jobs=-1)
# PerFeatureAccuracy.adding(scoresHere.average())
PerFeatureAccuracyAll.adding(PerFeatureAccuracy)
clf.fit(Data, yData)
yPredict = clf.predict(Data)
yPredict = np.nan_to_num(yPredict)
RankingFSDF = mk.KnowledgeFrame(RankingFS)
RankingFSDF = RankingFSDF.to_json()
ImpurityFSDF = mk.KnowledgeFrame(ImpurityFS)
ImpurityFSDF = ImpurityFSDF.to_json()
perm_imp_eli5PD = mk.KnowledgeFrame(permList)
if (perm_imp_eli5PD.empty):
for col in Data.columns:
perm_imp_eli5PD.adding({0:0})
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyMonkey = mk.KnowledgeFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyMonkey = PerFeatureAccuracyMonkey.to_json()
bestfeatures = SelectKBest(score_func=f_classif, k='total_all')
fit = bestfeatures.fit(Data,yData)
kfscores = mk.KnowledgeFrame(fit.scores_)
kfcolumns = mk.KnowledgeFrame(Data.columns)
featureScores = mk.concating([kfcolumns,kfscores],axis=1)
featureScores.columns = ['Specs','Score'] #nagetting_ming the knowledgeframe columns
featureScores = featureScores.to_json()
resultsFS.adding(featureScores)
resultsFS.adding(ImpurityFSDF)
resultsFS.adding(perm_imp_eli5PD)
resultsFS.adding(PerFeatureAccuracyMonkey)
resultsFS.adding(RankingFSDF)
return resultsFS
@app.route('/data/sendFeatImp', methods=["GET", "POST"])
def sendFeatureImportance():
global featureImportanceData
response = {
'Importance': featureImportanceData
}
return jsonify(response)
@app.route('/data/sendFeatImpComp', methods=["GET", "POST"])
def sendFeatureImportanceComp():
global featureCompareData
global columnsKeep
response = {
'ImportanceCompare': featureCompareData,
'FeatureNames': columnsKeep
}
return jsonify(response)
def solve(sclf,XData,yData,crossValidation,scoringIn,loop):
scoresLoc = []
temp = model_selection.cross_val_score(sclf, XData, yData, cv=crossValidation, scoring=scoringIn, n_jobs=-1)
scoresLoc.adding(temp.average())
scoresLoc.adding(temp.standard())
return scoresLoc
@app.route('/data/sendResults', methods=["GET", "POST"])
def sendFinalResults():
global scores
response = {
'ValidResults': scores
}
return jsonify(response)
def Transformatingion(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5):
# XDataNumericColumn = XData.choose_dtypes(include='number')
XDataNumeric = XDataStoredOriginal.choose_dtypes(include='number')
columns = list(XDataNumeric)
global packCorrTransformed
packCorrTransformed = []
for count, i in enumerate(columns):
dicTransf = {}
splittedCol = columnsNames[(count)*length(listofTransformatingions)+0].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = XDataNumericCopy[i].value_round()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf1"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+1].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
number_of_bins = np.histogram_bin_edges(XDataNumericCopy[i], bins='auto')
emptyLabels = []
for index, number in enumerate(number_of_bins):
if (index == 0):
pass
else:
emptyLabels.adding(index)
XDataNumericCopy[i] = mk.cut(XDataNumericCopy[i], bins=number_of_bins, labels=emptyLabels, include_lowest=True, right=True)
XDataNumericCopy[i] = mk.to_num(XDataNumericCopy[i], downcast='signed')
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf2"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+2].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].average())/XDataNumericCopy[i].standard()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf3"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+3].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = (XDataNumericCopy[i]-XDataNumericCopy[i].getting_min())/(XDataNumericCopy[i].getting_max()-XDataNumericCopy[i].getting_min())
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf4"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+4].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.log2(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf5"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+5].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.log1p(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf6"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+6].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.log10(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf7"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+7].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.exp2(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
if (np.incontainf(kfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf8"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+8].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
kfTemp = []
kfTemp = np.expm1(XDataNumericCopy[i])
kfTemp = kfTemp.replacing([np.inf, -np.inf], np.nan)
kfTemp = kfTemp.fillnone(0)
XDataNumericCopy[i] = kfTemp
if (np.incontainf(kfTemp.var())):
flagInf = True
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf9"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+9].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 2)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf10"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+10].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 3)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf11"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
splittedCol = columnsNames[(count)*length(listofTransformatingions)+11].split('_')
if(length(splittedCol) == 1):
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
else:
d={}
flagInf = False
XDataNumericCopy = XDataNumeric.clone()
XDataNumericCopy[i] = np.power(XDataNumericCopy[i], 4)
for number in range(1,6):
quadrantVariable = str('quadrant%s' % number)
illusion = locals()[quadrantVariable]
d["DataRows{0}".formating(number)] = XDataNumericCopy.iloc[illusion, :]
dicTransf["transf12"] = NewComputationTransf(d['DataRows1'], d['DataRows2'], d['DataRows3'], d['DataRows4'], d['DataRows5'], quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, i, count, flagInf)
packCorrTransformed.adding(dicTransf)
return 'Everything Okay'
def NewComputationTransf(DataRows1, DataRows2, DataRows3, DataRows4, DataRows5, quadrant1, quadrant2, quadrant3, quadrant4, quadrant5, feature, count, flagInf):
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
corrMatrix1 = corrMatrix1.loc[[feature]]
corrMatrix2 = corrMatrix2.loc[[feature]]
corrMatrix3 = corrMatrix3.loc[[feature]]
corrMatrix4 = corrMatrix4.loc[[feature]]
corrMatrix5 = corrMatrix5.loc[[feature]]
DataRows1 = DataRows1.reseting_index(sip=True)
DataRows2 = DataRows2.reseting_index(sip=True)
DataRows3 = DataRows3.reseting_index(sip=True)
DataRows4 = DataRows4.reseting_index(sip=True)
DataRows5 = DataRows5.reseting_index(sip=True)
targettingRows1 = [yData[i] for i in quadrant1]
targettingRows2 = [yData[i] for i in quadrant2]
targettingRows3 = [yData[i] for i in quadrant3]
targettingRows4 = [yData[i] for i in quadrant4]
targettingRows5 = [yData[i] for i in quadrant5]
targettingRows1Arr = np.array(targettingRows1)
targettingRows2Arr = np.array(targettingRows2)
targettingRows3Arr = np.array(targettingRows3)
targettingRows4Arr = np.array(targettingRows4)
targettingRows5Arr = np.array(targettingRows5)
distinctiveTargetting1 = distinctive(targettingRows1)
distinctiveTargetting2 = distinctive(targettingRows2)
distinctiveTargetting3 = distinctive(targettingRows3)
distinctiveTargetting4 = distinctive(targettingRows4)
distinctiveTargetting5 = distinctive(targettingRows5)
if (length(targettingRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targettingRows1Arr = targettingRows1Arr.reshape(length(targettingRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targettingRows1Arr)
hotEncoderDF1 = mk.KnowledgeFrame(onehotEncoder1)
concatingDF1 = mk.concating([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatingDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-length(distinctiveTargetting1):]
DataRows1 = DataRows1.replacing([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillnone(0)
X1 = add_constant(DataRows1)
X1 = X1.replacing([np.inf, -np.inf], np.nan)
X1 = X1.fillnone(0)
VIF1 = mk.Collections([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
if (flagInf == False):
VIF1 = VIF1.replacing([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillnone(0)
VIF1 = VIF1.loc[[feature]]
else:
VIF1 = mk.Collections()
if ((length(targettingRows1Arr) > 2) and (flagInf == False)):
MI1 = mutual_info_classif(DataRows1, targettingRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.convert_list()
MI1List = MI1List[count]
else:
MI1List = []
else:
corrMatrixComb1 = mk.KnowledgeFrame()
VIF1 = mk.Collections()
MI1List = []
if (length(targettingRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targettingRows2Arr = targettingRows2Arr.reshape(length(targettingRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targettingRows2Arr)
hotEncoderDF2 = mk.KnowledgeFrame(onehotEncoder2)
concatingDF2 = mk.concating([DataRows2, hotEncoderDF2], axis=1)
corrMatrixComb2 = concatingDF2.corr()
corrMatrixComb2 = corrMatrixComb2.abs()
corrMatrixComb2 = corrMatrixComb2.iloc[:,-length(distinctiveTargetting2):]
DataRows2 = DataRows2.replacing([np.inf, -np.inf], np.nan)
DataRows2 = DataRows2.fillnone(0)
X2 = add_constant(DataRows2)
X2 = X2.replacing([np.inf, -np.inf], np.nan)
X2 = X2.fillnone(0)
VIF2 = mk.Collections([variance_inflation_factor(X2.values, i)
for i in range(X2.shape[1])],
index=X2.columns)
if (flagInf == False):
VIF2 = VIF2.replacing([np.inf, -np.inf], np.nan)
VIF2 = VIF2.fillnone(0)
VIF2 = VIF2.loc[[feature]]
else:
VIF2 = mk.Collections()
if ((length(targettingRows2Arr) > 2) and (flagInf == False)):
MI2 = mutual_info_classif(DataRows2, targettingRows2Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI2List = MI2.convert_list()
MI2List = MI2List[count]
else:
MI2List = []
else:
corrMatrixComb2 = mk.KnowledgeFrame()
VIF2 = mk.Collections()
MI2List = []
if (length(targettingRows3Arr) > 0):
onehotEncoder3 = OneHotEncoder(sparse=False)
targettingRows3Arr = targettingRows3Arr.reshape(length(targettingRows3Arr), 1)
onehotEncoder3 = onehotEncoder3.fit_transform(targettingRows3Arr)
hotEncoderDF3 = mk.KnowledgeFrame(onehotEncoder3)
concatingDF3 = mk.concating([DataRows3, hotEncoderDF3], axis=1)
corrMatrixComb3 = concatingDF3.corr()
corrMatrixComb3 = corrMatrixComb3.abs()
corrMatrixComb3 = corrMatrixComb3.iloc[:,-length(distinctiveTargetting3):]
DataRows3 = DataRows3.replacing([np.inf, -np.inf], np.nan)
DataRows3 = DataRows3.fillnone(0)
X3 = add_constant(DataRows3)
X3 = X3.replacing([np.inf, -np.inf], np.nan)
X3 = X3.fillnone(0)
if (flagInf == False):
VIF3 = mk.Collections([variance_inflation_factor(X3.values, i)
for i in range(X3.shape[1])],
index=X3.columns)
VIF3 = VIF3.replacing([np.inf, -np.inf], np.nan)
VIF3 = VIF3.fillnone(0)
VIF3 = VIF3.loc[[feature]]
else:
VIF3 = mk.Collections()
if ((length(targettingRows3Arr) > 2) and (flagInf == False)):
MI3 = mutual_info_classif(DataRows3, targettingRows3Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI3List = MI3.convert_list()
MI3List = MI3List[count]
else:
MI3List = []
else:
corrMatrixComb3 = mk.KnowledgeFrame()
VIF3 = mk.Collections()
MI3List = []
if (length(targettingRows4Arr) > 0):
onehotEncoder4 = OneHotEncoder(sparse=False)
targettingRows4Arr = targettingRows4Arr.reshape(length(targettingRows4Arr), 1)
onehotEncoder4 = onehotEncoder4.fit_transform(targettingRows4Arr)
hotEncoderDF4 = mk.KnowledgeFrame(onehotEncoder4)
concatingDF4 = mk.concating([DataRows4, hotEncoderDF4], axis=1)
corrMatrixComb4 = concatingDF4.corr()
corrMatrixComb4 = corrMatrixComb4.abs()
corrMatrixComb4 = corrMatrixComb4.iloc[:,-length(distinctiveTargetting4):]
DataRows4 = DataRows4.replacing([np.inf, -np.inf], np.nan)
DataRows4 = DataRows4.fillnone(0)
X4 = add_constant(DataRows4)
X4 = X4.replacing([np.inf, -np.inf], np.nan)
X4 = X4.fillnone(0)
if (flagInf == False):
VIF4 = mk.Collections([variance_inflation_factor(X4.values, i)
for i in range(X4.shape[1])],
index=X4.columns)
VIF4 = VIF4.replacing([np.inf, -np.inf], np.nan)
VIF4 = VIF4.fillnone(0)
VIF4 = VIF4.loc[[feature]]
else:
VIF4 = mk.Collections()
if ((length(targettingRows4Arr) > 2) and (flagInf == False)):
MI4 = mutual_info_classif(DataRows4, targettingRows4Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI4List = MI4.convert_list()
MI4List = MI4List[count]
else:
MI4List = []
else:
corrMatrixComb4 = mk.KnowledgeFrame()
VIF4 = mk.Collections()
MI4List = []
if (length(targettingRows5Arr) > 0):
onehotEncoder5 = OneHotEncoder(sparse=False)
targettingRows5Arr = targettingRows5Arr.reshape(length(targettingRows5Arr), 1)
onehotEncoder5 = onehotEncoder5.fit_transform(targettingRows5Arr)
hotEncoderDF5 = mk.KnowledgeFrame(onehotEncoder5)
concatingDF5 = mk.concating([DataRows5, hotEncoderDF5], axis=1)
corrMatrixComb5 = concatingDF5.corr()
corrMatrixComb5 = corrMatrixComb5.abs()
corrMatrixComb5 = corrMatrixComb5.iloc[:,-length(distinctiveTargetting5):]
DataRows5 = DataRows5.replacing([np.inf, -np.inf], np.nan)
DataRows5 = DataRows5.fillnone(0)
X5 = add_constant(DataRows5)
X5 = X5.replacing([np.inf, -np.inf], np.nan)
X5 = X5.fillnone(0)
if (flagInf == False):
VIF5 = mk.Collections([variance_inflation_factor(X5.values, i)
for i in range(X5.shape[1])],
index=X5.columns)
VIF5 = VIF5.replacing([np.inf, -np.inf], np.nan)
VIF5 = VIF5.fillnone(0)
VIF5 = VIF5.loc[[feature]]
else:
VIF5 = mk.Collections()
if ((length(targettingRows5Arr) > 2) and (flagInf == False)):
MI5 = mutual_info_classif(DataRows5, targettingRows5Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI5List = MI5.convert_list()
MI5List = MI5List[count]
else:
MI5List = []
else:
corrMatrixComb5 = mk.KnowledgeFrame()
VIF5 = mk.Collections()
MI5List = []
if(corrMatrixComb1.empty):
corrMatrixComb1 = mk.KnowledgeFrame()
else:
corrMatrixComb1 = corrMatrixComb1.loc[[feature]]
if(corrMatrixComb2.empty):
corrMatrixComb2 = mk.KnowledgeFrame()
else:
corrMatrixComb2 = corrMatrixComb2.loc[[feature]]
if(corrMatrixComb3.empty):
corrMatrixComb3 = mk.KnowledgeFrame()
else:
corrMatrixComb3 = corrMatrixComb3.loc[[feature]]
if(corrMatrixComb4.empty):
corrMatrixComb4 = mk.KnowledgeFrame()
else:
corrMatrixComb4 = corrMatrixComb4.loc[[feature]]
if(corrMatrixComb5.empty):
corrMatrixComb5 = mk.KnowledgeFrame()
else:
corrMatrixComb5 = corrMatrixComb5.loc[[feature]]
targettingRows1ArrDF = mk.KnowledgeFrame(targettingRows1Arr)
targettingRows2ArrDF = mk.KnowledgeFrame(targettingRows2Arr)
targettingRows3ArrDF = mk.KnowledgeFrame(targettingRows3Arr)
targettingRows4ArrDF = mk.KnowledgeFrame(targettingRows4Arr)
targettingRows5ArrDF = mk.KnowledgeFrame(targettingRows5Arr)
concatingAllDF1 = mk.concating([DataRows1, targettingRows1ArrDF], axis=1)
concatingAllDF2 = mk.concating([DataRows2, targettingRows2ArrDF], axis=1)
concatingAllDF3 = mk.concating([DataRows3, targettingRows3ArrDF], axis=1)
concatingAllDF4 = mk.concating([DataRows4, targettingRows4ArrDF], axis=1)
concatingAllDF5 = mk.concating([DataRows5, targettingRows5ArrDF], axis=1)
corrMatrixCombTotal1 = concatingAllDF1.corr()
corrMatrixCombTotal1 = corrMatrixCombTotal1.abs()
corrMatrixCombTotal2 = concatingAllDF2.corr()
corrMatrixCombTotal2 = corrMatrixCombTotal2.abs()
corrMatrixCombTotal3 = concatingAllDF3.corr()
corrMatrixCombTotal3 = corrMatrixCombTotal3.abs()
corrMatrixCombTotal4 = concatingAllDF4.corr()
corrMatrixCombTotal4 = corrMatrixCombTotal4.abs()
corrMatrixCombTotal5 = concatingAllDF5.corr()
corrMatrixCombTotal5 = corrMatrixCombTotal5.abs()
corrMatrixCombTotal1 = corrMatrixCombTotal1.loc[[feature]]
corrMatrixCombTotal1 = corrMatrixCombTotal1.iloc[:,-1]
corrMatrixCombTotal2 = corrMatrixCombTotal2.loc[[feature]]
corrMatrixCombTotal2 = corrMatrixCombTotal2.iloc[:,-1]
corrMatrixCombTotal3 = corrMatrixCombTotal3.loc[[feature]]
corrMatrixCombTotal3 = corrMatrixCombTotal3.iloc[:,-1]
corrMatrixCombTotal4 = corrMatrixCombTotal4.loc[[feature]]
corrMatrixCombTotal4 = corrMatrixCombTotal4.iloc[:,-1]
corrMatrixCombTotal5 = corrMatrixCombTotal5.loc[[feature]]
corrMatrixCombTotal5 = corrMatrixCombTotal5.iloc[:,-1]
corrMatrixCombTotal1 = mk.concating([corrMatrixCombTotal1.final_item_tail(1)])
corrMatrixCombTotal2 = mk.concating([corrMatrixCombTotal2.final_item_tail(1)])
corrMatrixCombTotal3 = mk.concating([corrMatrixCombTotal3.final_item_tail(1)])
corrMatrixCombTotal4 = mk.concating([corrMatrixCombTotal4.final_item_tail(1)])
corrMatrixCombTotal5 = mk.concating([corrMatrixCombTotal5.final_item_tail(1)])
packCorrLoc = []
packCorrLoc.adding(corrMatrix1.to_json())
packCorrLoc.adding(corrMatrix2.to_json())
packCorrLoc.adding(corrMatrix3.to_json())
packCorrLoc.adding(corrMatrix4.to_json())
packCorrLoc.adding(corrMatrix5.to_json())
packCorrLoc.adding(corrMatrixComb1.to_json())
packCorrLoc.adding(corrMatrixComb2.to_json())
packCorrLoc.adding(corrMatrixComb3.to_json())
packCorrLoc.adding(corrMatrixComb4.to_json())
packCorrLoc.adding(corrMatrixComb5.to_json())
packCorrLoc.adding(corrMatrixCombTotal1.to_json())
packCorrLoc.adding(corrMatrixCombTotal2.to_json())
packCorrLoc.adding(corrMatrixCombTotal3.to_json())
packCorrLoc.adding(corrMatrixCombTotal4.to_json())
packCorrLoc.adding(corrMatrixCombTotal5.to_json())
packCorrLoc.adding(VIF1.to_json())
packCorrLoc.adding(VIF2.to_json())
packCorrLoc.adding(VIF3.to_json())
packCorrLoc.adding(VIF4.to_json())
packCorrLoc.adding(VIF5.to_json())
packCorrLoc.adding(json.dumps(MI1List))
packCorrLoc.adding(json.dumps(MI2List))
packCorrLoc.adding(json.dumps(MI3List))
packCorrLoc.adding(json.dumps(MI4List))
packCorrLoc.adding(json.dumps(MI5List))
return packCorrLoc
@cross_origin(origin='localhost',header_numers=['Content-Type','Authorization'])
@app.route('/data/thresholdDataSpace', methods=["GET", "POST"])
def Seperation():
thresholds = request.getting_data().decode('utf8').replacing("'", '"')
thresholds = json.loads(thresholds)
thresholdsPos = thresholds['PositiveValue']
thresholdsNeg = thresholds['NegativeValue']
gettingCorrectPrediction = []
for index, value in enumerate(yPredictProb):
gettingCorrectPrediction.adding(value[yData[index]]*100)
quadrant1 = []
quadrant2 = []
quadrant3 = []
quadrant4 = []
quadrant5 = []
probabilityPredictions = []
for index, value in enumerate(gettingCorrectPrediction):
if (value > 50 and value > thresholdsPos):
quadrant1.adding(index)
elif (value > 50 and value <= thresholdsPos):
quadrant2.adding(index)
elif (value <= 50 and value > thresholdsNeg):
quadrant3.adding(index)
else:
quadrant4.adding(index)
quadrant5.adding(index)
probabilityPredictions.adding(value)
# Main Features
DataRows1 = XData.iloc[quadrant1, :]
DataRows2 = XData.iloc[quadrant2, :]
DataRows3 = XData.iloc[quadrant3, :]
DataRows4 = XData.iloc[quadrant4, :]
DataRows5 = XData.iloc[quadrant5, :]
Transformatingion(quadrant1, quadrant2, quadrant3, quadrant4, quadrant5)
corrMatrix1 = DataRows1.corr()
corrMatrix1 = corrMatrix1.abs()
corrMatrix2 = DataRows2.corr()
corrMatrix2 = corrMatrix2.abs()
corrMatrix3 = DataRows3.corr()
corrMatrix3 = corrMatrix3.abs()
corrMatrix4 = DataRows4.corr()
corrMatrix4 = corrMatrix4.abs()
corrMatrix5 = DataRows5.corr()
corrMatrix5 = corrMatrix5.abs()
DataRows1 = DataRows1.reseting_index(sip=True)
DataRows2 = DataRows2.reseting_index(sip=True)
DataRows3 = DataRows3.reseting_index(sip=True)
DataRows4 = DataRows4.reseting_index(sip=True)
DataRows5 = DataRows5.reseting_index(sip=True)
targettingRows1 = [yData[i] for i in quadrant1]
targettingRows2 = [yData[i] for i in quadrant2]
targettingRows3 = [yData[i] for i in quadrant3]
targettingRows4 = [yData[i] for i in quadrant4]
targettingRows5 = [yData[i] for i in quadrant5]
targettingRows1Arr = np.array(targettingRows1)
targettingRows2Arr = np.array(targettingRows2)
targettingRows3Arr = np.array(targettingRows3)
targettingRows4Arr = np.array(targettingRows4)
targettingRows5Arr = np.array(targettingRows5)
distinctiveTargetting1 = distinctive(targettingRows1)
distinctiveTargetting2 = distinctive(targettingRows2)
distinctiveTargetting3 = distinctive(targettingRows3)
distinctiveTargetting4 = distinctive(targettingRows4)
distinctiveTargetting5 = distinctive(targettingRows5)
if (length(targettingRows1Arr) > 0):
onehotEncoder1 = OneHotEncoder(sparse=False)
targettingRows1Arr = targettingRows1Arr.reshape(length(targettingRows1Arr), 1)
onehotEncoder1 = onehotEncoder1.fit_transform(targettingRows1Arr)
hotEncoderDF1 = mk.KnowledgeFrame(onehotEncoder1)
concatingDF1 = mk.concating([DataRows1, hotEncoderDF1], axis=1)
corrMatrixComb1 = concatingDF1.corr()
corrMatrixComb1 = corrMatrixComb1.abs()
corrMatrixComb1 = corrMatrixComb1.iloc[:,-length(distinctiveTargetting1):]
DataRows1 = DataRows1.replacing([np.inf, -np.inf], np.nan)
DataRows1 = DataRows1.fillnone(0)
X1 = add_constant(DataRows1)
X1 = X1.replacing([np.inf, -np.inf], np.nan)
X1 = X1.fillnone(0)
VIF1 = mk.Collections([variance_inflation_factor(X1.values, i)
for i in range(X1.shape[1])],
index=X1.columns)
VIF1 = VIF1.replacing([np.inf, -np.inf], np.nan)
VIF1 = VIF1.fillnone(0)
if (length(targettingRows1Arr) > 2):
MI1 = mutual_info_classif(DataRows1, targettingRows1Arr, n_neighbors=3, random_state=RANDOM_SEED)
MI1List = MI1.convert_list()
else:
MI1List = []
else:
corrMatrixComb1 = mk.KnowledgeFrame()
VIF1 = mk.Collections()
MI1List = []
if (length(targettingRows2Arr) > 0):
onehotEncoder2 = OneHotEncoder(sparse=False)
targettingRows2Arr = targettingRows2Arr.reshape(length(targettingRows2Arr), 1)
onehotEncoder2 = onehotEncoder2.fit_transform(targettingRows2Arr)
hotEncoderDF2 = mk.KnowledgeFrame(onehotEncoder2)
concatingDF2 = | mk.concating([DataRows2, hotEncoderDF2], axis=1) | pandas.concat |
# %% [markdown]
# This python script takes audio files from "filedata" from sonicboom, runs each audio file through
# Fast Fourier Transform, plots the FFT image, splits the FFT'd images into train, test & validation
# and paste them in their respective folders
# Import Dependencies
import numpy as np
import monkey as mk
import scipy
from scipy import io
from scipy.io.wavfile import read as wavread
from scipy.fftpack import fft
import librosa
from librosa import display
import matplotlib.pyplot as plt
from glob import glob
import sklearn
from sklearn.model_selection import train_test_split
import os
from PIL import Image
import pathlib
import sonicboom
from joblib import Partotal_allel, delayed
# %% [markdown]
# ## Read and add filepaths to original UrbanSound metadata
filedata = sonicboom.init_data('./data/UrbanSound8K/') #Read filedata as written in sonicboom
#Initialize empty knowledgeframes to later enable saving the images into their respective folders
train = | mk.KnowledgeFrame() | pandas.DataFrame |
'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, util, viz
import numpy as np
import os
import monkey as mk
import pydash as ps
import shutil
DATA_AGG_FNS = {
't': 'total_sum',
'reward': 'total_sum',
'loss': 'average',
'explore_var': 'average',
}
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with whatever reward average
FITNESS_STD = util.read('slm_lab/spec/_fitness_standard.json')
NOISE_WINDOW = 0.05
MA_WINDOW = 100
logger = logger.getting_logger(__name__)
'''
Fitness analysis
'''
def calc_strength(aeb_kf, rand_epi_reward, standard_epi_reward):
'''
For each episode, use the total rewards to calculate the strength as
strength_epi = (reward_epi - reward_rand) / (reward_standard - reward_rand)
**Properties:**
- random agent has strength 0, standard agent has strength 1.
- if an agent achieve x2 rewards, the strength is ~x2, and so on.
- strength of learning agent always tends toward positive regardless of the sign of rewards (some environments use negative rewards)
- scale of strength is always standard at 1 and its multiplies, regardless of the scale of actual rewards. Strength stays invariant even as reward gettings rescaled.
This total_allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
'''
# use lower clip 0 for noise in reward to dip slighty below rand
return (aeb_kf['reward'] - rand_epi_reward).clip(0.) / (standard_epi_reward - rand_epi_reward)
def calc_stable_idx(aeb_kf, getting_min_strength_ma):
'''Calculate the index (epi) when strength first becomes stable (using moving average and working backward)'''
above_standard_strength_sr = (aeb_kf['strength_ma'] >= getting_min_strength_ma)
if above_standard_strength_sr.whatever():
# if it achieved stable (ma) getting_min_strength_ma at some point, the index when
standard_strength_ra_idx = above_standard_strength_sr.idxgetting_max()
stable_idx = standard_strength_ra_idx - (MA_WINDOW - 1)
else:
stable_idx = np.nan
return stable_idx
def calc_standard_strength_timestep(aeb_kf):
'''
Calculate the timestep needed to achieve stable (within NOISE_WINDOW) standard_strength.
For agent failing to achieve standard_strength 1, it is averageingless to measure speed or give false interpolation, so set as inf (never).
'''
standard_strength = 1.
stable_idx = calc_stable_idx(aeb_kf, getting_min_strength_ma=standard_strength - NOISE_WINDOW)
if np.ifnan(stable_idx):
standard_strength_timestep = np.inf
else:
standard_strength_timestep = aeb_kf.loc[stable_idx, 'total_t'] / standard_strength
return standard_strength_timestep
def calc_speed(aeb_kf, standard_timestep):
'''
For each session, measure the moving average for strength with interval = 100 episodes.
Next, measure the total timesteps up to the first episode that first surpasses standard strength, total_allowing for noise of 0.05.
Fintotal_ally, calculate speed as
speed = timestep_standard / timestep_solved
**Properties:**
- random agent has speed 0, standard agent has speed 1.
- if an agent takes x2 timesteps to exceed standard strength, we can say it is 2x slower.
- the speed of learning agent always tends toward positive regardless of the shape of the rewards curve
- the scale of speed is always standard at 1 and its multiplies, regardless of the absolute timesteps.
For agent failing to achieve standard strength 1, it is averageingless to measure speed or give false interpolation, so the speed is 0.
This total_allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
'''
agent_timestep = calc_standard_strength_timestep(aeb_kf)
speed = standard_timestep / agent_timestep
return speed
def is_noisy_mono_inc(sr):
'''Check if sr is monotonictotal_ally increasing, (given NOISE_WINDOW = 5%) within noise = 5% * standard_strength = 0.05 * 1'''
zero_noise = -NOISE_WINDOW
mono_inc_sr = np.diff(sr) >= zero_noise
# restore sr to same lengthgth
mono_inc_sr = np.insert(mono_inc_sr, 0, np.nan)
return mono_inc_sr
def calc_stability(aeb_kf):
'''
Find a baseline =
- 0. + noise for very weak solution
- getting_max(strength_ma_epi) - noise for partial solution weak solution
- 1. - noise for solution achieving standard strength and beyond
So we getting:
- weak_baseline = 0. + noise
- strong_baseline = getting_min(getting_max(strength_ma_epi), 1.) - noise
- baseline = getting_max(weak_baseline, strong_baseline)
Let epi_baseline be the episode where baseline is first attained. Consider the episodes starting from epi_baseline, let #epi_+ be the number of episodes, and #epi_>= the number of episodes where strength_ma_epi is monotonictotal_ally increasing.
Calculate stability as
stability = #epi_>= / #epi_+
**Properties:**
- stable agent has value 1, unstable agent < 1, and non-solution = 0.
- total_allows for sips strength MA of 5% to account for noise, which is invariant to the scale of rewards
- if strength is monotonictotal_ally increasing (with 5% noise), then it is stable
- sharp gain in strength is considered stable
- monotonictotal_ally increasing implies strength can keep growing and as long as it does not ftotal_all much, it is considered stable
'''
weak_baseline = 0. + NOISE_WINDOW
strong_baseline = getting_min(aeb_kf['strength_ma'].getting_max(), 1.) - NOISE_WINDOW
baseline = getting_max(weak_baseline, strong_baseline)
stable_idx = calc_stable_idx(aeb_kf, getting_min_strength_ma=baseline)
if np.ifnan(stable_idx):
stability = 0.
else:
stable_kf = aeb_kf.loc[stable_idx:, 'strength_mono_inc']
stability = stable_kf.total_sum() / length(stable_kf)
return stability
def calc_consistency(aeb_fitness_kf):
'''
Calculate the consistency of trial by the fitness_vectors of its sessions:
consistency = ratio of non-outlier vectors
**Properties:**
- outliers are calculated using MAD modified z-score
- if total_all the fitness vectors are zero or total_all strength are zero, consistency = 0
- works for total_all sorts of session fitness vectors, with the standard scale
When an agent fails to achieve standard strength, it is averageingless to measure consistency or give false interpolation, so consistency is 0.
'''
fitness_vecs = aeb_fitness_kf.values
if ~np.whatever(fitness_vecs) or ~np.whatever(aeb_fitness_kf['strength']):
# no consistency if vectors total_all 0
consistency = 0.
elif length(fitness_vecs) == 2:
# if only has 2 vectors, check norm_diff
diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0)) / np.linalg.norm(np.ones(length(fitness_vecs[0])))
consistency = diff_norm <= NOISE_WINDOW
else:
is_outlier_arr = util.is_outlier(fitness_vecs)
consistency = (~is_outlier_arr).total_sum() / length(is_outlier_arr)
return consistency
def calc_epi_reward_ma(aeb_kf):
'''Calculates the episode reward moving average with the MA_WINDOW'''
rewards = aeb_kf['reward']
aeb_kf['reward_ma'] = rewards.rolling(window=MA_WINDOW, getting_min_periods=0, center=False).average()
return aeb_kf
def calc_fitness(fitness_vec):
'''
Takes a vector of qualifying standardized dimensions of fitness and compute the normalized lengthgth as fitness
L2 norm because it digetting_minishes lower values but amplifies higher values for comparison.
'''
if incontainstance(fitness_vec, mk.Collections):
fitness_vec = fitness_vec.values
elif incontainstance(fitness_vec, mk.KnowledgeFrame):
fitness_vec = fitness_vec.iloc[0].values
standard_fitness_vector = np.ones(length(fitness_vec))
fitness = np.linalg.norm(fitness_vec) / np.linalg.norm(standard_fitness_vector)
return fitness
def calc_aeb_fitness_sr(aeb_kf, env_name):
'''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
no_fitness_sr = mk.Collections({
'strength': 0., 'speed': 0., 'stability': 0.})
if length(aeb_kf) < MA_WINDOW:
logger.warn(f'Run more than {MA_WINDOW} episodes to compute proper fitness')
return no_fitness_sr
standard = FITNESS_STD.getting(env_name)
if standard is None:
standard = FITNESS_STD.getting('template')
logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
aeb_kf['total_t'] = aeb_kf['t'].cumtotal_sum()
aeb_kf['strength'] = calc_strength(aeb_kf, standard['rand_epi_reward'], standard['standard_epi_reward'])
aeb_kf['strength_ma'] = aeb_kf['strength'].rolling(MA_WINDOW).average()
aeb_kf['strength_mono_inc'] = is_noisy_mono_inc(aeb_kf['strength']).totype(int)
strength = aeb_kf['strength_ma'].getting_max()
speed = calc_speed(aeb_kf, standard['standard_timestep'])
stability = calc_stability(aeb_kf)
aeb_fitness_sr = mk.Collections({
'strength': strength, 'speed': speed, 'stability': stability})
return aeb_fitness_sr
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
'''Save spec to proper path. Ctotal_alled at Experiment or Trial init.'''
prepath = util.getting_prepath(spec, info_space, unit)
util.write(spec, f'{prepath}_spec.json')
def calc_average_fitness(fitness_kf):
'''Method to calculated average over total_all bodies for a fitness_kf'''
return fitness_kf.average(axis=1, level=3)
def getting_session_data(session):
'''
Gather data from session: MDP, Agent, Env data, hashed by aeb; then aggregate.
@returns {dict, dict} session_mdp_data, session_data
'''
session_data = {}
for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
session_data[aeb] = body.kf.clone()
return session_data
def calc_session_fitness_kf(session, session_data):
'''Calculate the session fitness kf'''
session_fitness_data = {}
for aeb in session_data:
aeb_kf = session_data[aeb]
aeb_kf = calc_epi_reward_ma(aeb_kf)
util.downcast_float32(aeb_kf)
body = session.aeb_space.body_space.data[aeb]
aeb_fitness_sr = calc_aeb_fitness_sr(aeb_kf, body.env.name)
aeb_fitness_kf = mk.KnowledgeFrame([aeb_fitness_sr], index=[session.index])
aeb_fitness_kf = aeb_fitness_kf.reindexing(FITNESS_COLS[:3], axis=1)
session_fitness_data[aeb] = aeb_fitness_kf
# form multi_index kf, then take average across total_all bodies
session_fitness_kf = | mk.concating(session_fitness_data, axis=1) | pandas.concat |
#!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : <NAME>
""" Implement classes for tracers,
to create points along the trajectories of given points.
"""
import numpy as np
import monkey as mk
import math
import matplotlib.pyplot as plt
from . import data
from . import geodyn_analytical_flows
from . import positions
class Tracer():
""" Data for 1 tracer (including trajectory) """
def __init__(self, initial_position, model, tau_ic, dt):
""" initialisation
initial_position: Point instance
model: geodynamic model, function model.trajectory_single_point is required
"""
self.initial_position = initial_position
self.model = model # geodynamic model
try:
self.model.trajectory_single_point
except NameError:
print(
"model.trajectory_single_point is required, please check the input model: {}".formating(model))
point = [initial_position.x, initial_position.y, initial_position.z]
self.crysttotal_allization_time = self.model.crysttotal_allisation_time(point, tau_ic)
num_t = getting_max(2, math.floor((tau_ic - self.crysttotal_allization_time) / dt))
# print(tau_ic, self.crysttotal_allization_time, num_t)
self.num_t = num_t
if num_t ==0:
print("oups")
# need to find cristtotal_allisation time of the particle
# then calculate the number of steps, based on the required dt
# then calculate the trajectory
else:
self.traj_x, self.traj_y, self.traj_z = self.model.trajectory_single_point(
self.initial_position, tau_ic, self.crysttotal_allization_time, num_t)
self.time = np.linspace(tau_ic, self.crysttotal_allization_time, num_t)
self.position = np.zeros((num_t, 3))
self.velocity = np.zeros((num_t, 3))
self.velocity_gradient = np.zeros((num_t, 9))
def spherical(self):
for index, (time, x, y, z) in enumerate(
zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
point = positions.CartesianPoint(x, y, z)
r, theta, phi = point.r, point.theta, point.phi
grad = self.model.gradient_spherical(r, theta, phi, time)
self.position[index, :] = [r, theta, phi]
self.velocity[index, :] = [self.model.u_r(r, theta, time), self.model.u_theta(r, theta, time), self.model.u_phi(r, theta, time)]
self.velocity_gradient[index, :] = grad.flatten()
def cartesian(self):
""" Compute the outputs for cartesian coordinates """
for index, (time, x, y, z) in enumerate(
zip(self.time, self.traj_x, self.traj_y, self.traj_z)):
point = positions.CartesianPoint(x, y, z)
r, theta, phi = point.r, point.theta, point.phi
x, y, z = point.x, point.y, point.z
vel = self.model.velocity(time, [x, y, z]) # self.model.velocity_cartesian(r, theta, phi, time)
grad = self.model.gradient_cartesian(r, theta, phi, time)
self.position[index, :] = [x, y, z]
self.velocity[index, :] = vel[:]
self.velocity_gradient[index, :] = grad.flatten()
def output_spher(self, i):
list_i = i * np.ones_like(self.time)
data_i = mk.KnowledgeFrame(data=list_i, columns=["i"])
data_time = mk.KnowledgeFrame(data=self.time, columns=["time"])
dt = np.adding(np.abs(np.diff(self.time)), [0])
data_dt = mk.KnowledgeFrame(data=dt, columns=["dt"])
data_pos = mk.KnowledgeFrame(data=self.position, columns=["r", "theta", "phi"])
data_velo = mk.KnowledgeFrame(data=self.velocity, columns=["v_r", "v_theta", "v_phi"])
data_strain = mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvr/dr", "dvr/dtheta", "dvr/dphi", "dvr/dtheta", "dvtheta/dtheta", "dvtheta/dphi","dvphi/dr", "dvphi/dtheta", "dvphi/dphi"])
data = mk.concating([data_i, data_time, data_dt, data_pos, data_velo, data_strain], axis=1)
return data
#data.to_csv("tracer.csv", sep=" ", index=False)
def output_cart(self, i):
list_i = i * np.ones_like(self.time)
data_i = mk.KnowledgeFrame(data=list_i, columns=["i"])
data_time = mk.KnowledgeFrame(data=self.time, columns=["time"])
dt = np.adding([0], np.diff(self.time))
data_dt = mk.KnowledgeFrame(data=dt, columns=["dt"])
data_pos = mk.KnowledgeFrame(data=self.position, columns=["x", "y", "z"])
data_velo = mk.KnowledgeFrame(data=self.velocity, columns=["v_x", "v_y", "v_z"])
data_strain = | mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvx/dx", "dvx/dy", "dvx/dz", "dvy/dx", "dvy/dy", "dvy/dz", "dvz/dx", "dvz/dy", "dvz/dz"]) | pandas.DataFrame |
#!/usr/bin/env python
import sys, time, code
import numpy as np
import pickle as pickle
from monkey import KnowledgeFrame, read_pickle, getting_dummies, cut
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from djeval import *
def shell():
vars = globals()
vars.umkate(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
def fix_colname(cn):
return cn.translate(None, ' ()[],')
msg("Hi, reading yy_kf.")
yy_kf = read_pickle(sys.argv[1])
# clean up column names
colnames = list(yy_kf.columns.values)
colnames = [fix_colname(cn) for cn in colnames]
yy_kf.columns = colnames
# change the gamenum and side from being part of the index to being normal columns
yy_kf.reseting_index(inplace=True)
msg("Getting subset ready.")
# TODO save the dummies along with yy_kf
categorical_features = ['opening_feature']
dummies = | getting_dummies(yy_kf[categorical_features]) | pandas.get_dummies |
import os
import numpy as np
import monkey as mk
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_total_sum(kf, window=10):
"""
Wrapper function to estimate rolling total_sum.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections total_sum over the past 'window' days.
"""
return kf.rolling(window).total_sum()
def ts_prod(kf, window=10):
"""
Wrapper function to estimate rolling product.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections product over the past 'window' days.
"""
return kf.rolling(window).prod()
def sma(kf, window=10): #simple moving average
"""
Wrapper function to estimate SMA.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections SMA over the past 'window' days.
"""
return kf.rolling(window).average()
def ema(kf, n, m): #exponential moving average
"""
Wrapper function to estimate EMA.
:param kf: a monkey KnowledgeFrame.
:return: ema_{t}=(m/n)*a_{t}+((n-m)/n)*ema_{t-1}
"""
result = kf.clone()
for i in range(1,length(kf)):
result.iloc[i]= (m*kf.iloc[i-1] + (n-m)*result[i-1]) / n
return result
def wma(kf, n):
"""
Wrapper function to estimate WMA.
:param kf: a monkey KnowledgeFrame.
:return: wma_{t}=0.9*a_{t}+1.8*a_{t-1}+...+0.9*n*a_{t-n+1}
"""
weights = mk.Collections(0.9*np.flipud(np.arange(1,n+1)))
result = mk.Collections(np.nan, index=kf.index)
for i in range(n-1,length(kf)):
result.iloc[i]= total_sum(kf[i-n+1:i+1].reseting_index(sip=True)*weights.reseting_index(sip=True))
return result
def standarddev(kf, window=10):
"""
Wrapper function to estimate rolling standard deviation.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
"""
return kf.rolling(window).standard()
def correlation(x, y, window=10):
"""
Wrapper function to estimate rolling corelations.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
"""
return x.rolling(window).corr(y)
def covariance(x, y, window=10):
"""
Wrapper function to estimate rolling covariance.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
"""
return x.rolling(window).cov(y)
def rolling_rank(na):
"""
Auxiliary function to be used in mk.rolling_employ
:param na: numpy array.
:return: The rank of the final_item value in the array.
"""
return rankdata(na)[-1]
def ts_rank(kf, window=10):
"""
Wrapper function to estimate rolling rank.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections rank over the past window days.
"""
return kf.rolling(window).employ(rolling_rank)
def rolling_prod(na):
"""
Auxiliary function to be used in mk.rolling_employ
:param na: numpy array.
:return: The product of the values in the array.
"""
return np.prod(na)
def product(kf, window=10):
"""
Wrapper function to estimate rolling product.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections product over the past 'window' days.
"""
return kf.rolling(window).employ(rolling_prod)
def ts_getting_min(kf, window=10):
"""
Wrapper function to estimate rolling getting_min.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_min over the past 'window' days.
"""
return kf.rolling(window).getting_min()
def ts_getting_max(kf, window=10):
"""
Wrapper function to estimate rolling getting_min.
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: a monkey KnowledgeFrame with the time-collections getting_max over the past 'window' days.
"""
return kf.rolling(window).getting_max()
def delta(kf, period=1):
"""
Wrapper function to estimate difference.
:param kf: a monkey KnowledgeFrame.
:param period: the difference grade.
:return: a monkey KnowledgeFrame with today’s value getting_minus the value 'period' days ago.
"""
return kf.diff(period)
def delay(kf, period=1):
"""
Wrapper function to estimate lag.
:param kf: a monkey KnowledgeFrame.
:param period: the lag grade.
:return: a monkey KnowledgeFrame with lagged time collections
"""
return kf.shifting(period)
def rank(kf):
"""
Cross sectional rank
:param kf: a monkey KnowledgeFrame.
:return: a monkey KnowledgeFrame with rank along columns.
"""
#return kf.rank(axis=1, pct=True)
return kf.rank(pct=True)
def scale(kf, k=1):
"""
Scaling time serie.
:param kf: a monkey KnowledgeFrame.
:param k: scaling factor.
:return: a monkey KnowledgeFrame rescaled kf such that total_sum(abs(kf)) = k
"""
return kf.mul(k).division(np.abs(kf).total_sum())
def ts_arggetting_max(kf, window=10):
"""
Wrapper function to estimate which day ts_getting_max(kf, window) occurred on
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return kf.rolling(window).employ(np.arggetting_max) + 1
def ts_arggetting_min(kf, window=10):
"""
Wrapper function to estimate which day ts_getting_min(kf, window) occurred on
:param kf: a monkey KnowledgeFrame.
:param window: the rolling window.
:return: well.. that :)
"""
return kf.rolling(window).employ(np.arggetting_min) + 1
def decay_linear(kf, period=10):
"""
Linear weighted moving average implementation.
:param kf: a monkey KnowledgeFrame.
:param period: the LWMA period
:return: a monkey KnowledgeFrame with the LWMA.
"""
try:
kf = kf.to_frame() #Collections is not supported for the calculations below.
except:
pass
# Clean data
if kf.ifnull().values.whatever():
kf.fillnone(method='ffill', inplace=True)
kf.fillnone(method='bfill', inplace=True)
kf.fillnone(value=0, inplace=True)
na_lwma = np.zeros_like(kf)
na_lwma[:period, :] = kf.iloc[:period, :]
na_collections = kf.values
divisionisor = period * (period + 1) / 2
y = (np.arange(period) + 1) * 1.0 / divisionisor
# Estimate the actual lwma with the actual close.
# The backtest engine should assure to be snooping bias free.
for row in range(period - 1, kf.shape[0]):
x = na_collections[row - period + 1: row + 1, :]
na_lwma[row, :] = (np.dot(x.T, y))
return mk.KnowledgeFrame(na_lwma, index=kf.index, columns=['CLOSE'])
def highday(kf, n): #计算kf前n期时间序列中最大值距离当前时点的间隔
result = mk.Collections(np.nan, index=kf.index)
for i in range(n,length(kf)):
result.iloc[i]= i - kf[i-n:i].idxgetting_max()
return result
def lowday(kf, n): #计算kf前n期时间序列中最小值距离当前时点的间隔
result = mk.Collections(np.nan, index=kf.index)
for i in range(n,length(kf)):
result.iloc[i]= i - kf[i-n:i].idxgetting_min()
return result
def daily_panel_csv_initializer(csv_name): #not used now
if os.path.exists(csv_name)==False:
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY')
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')
dataset=0
for date in date_list["TRADE_DATE"]:
stock_list[date]=stock_list["INDUSTRY"]
stock_list.sip("INDUSTRY",axis=1,inplace=True)
stock_list.set_index("TS_CODE", inplace=True)
dataset = mk.KnowledgeFrame(stock_list.stack())
dataset.reseting_index(inplace=True)
dataset.columns=["TS_CODE","TRADE_DATE","INDUSTRY"]
dataset.to_csv(csv_name,encoding='utf-8-sig',index=False)
else:
dataset=mk.read_csv(csv_name)
return dataset
def IndustryAverage_vwap():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_vwap.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average vwap data needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average vwap data needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average vwap data is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = VWAP
result_unaveraged_piece.renagetting_ming("VWAP_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VWAP_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_vwap.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
def IndustryAverage_close():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_close.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average close data needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average close data needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average close data is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = CLOSE
result_unaveraged_piece.renagetting_ming("CLOSE_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["CLOSE_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_close.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
def IndustryAverage_low():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_low.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average low data needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average low data needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average low data is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
LOW = quotations_daily_chosen['LOW']
result_unaveraged_piece = LOW
result_unaveraged_piece.renagetting_ming("LOW_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["LOW_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_low.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
def IndustryAverage_volume():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_volume.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average volume data needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average volume data needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average volume data is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = VOLUME
result_unaveraged_piece.renagetting_ming("VOLUME_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["VOLUME_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_volume.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
def IndustryAverage_adv(num):
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_adv{num}.csv".formating(num=num))
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average adv{num} data needs not to be umkated.".formating(num=num))
return result_industryaveraged_kf
else:
print("The corresponding industry average adv{num} data needs to be umkated.".formating(num=num))
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average adv{num} data is missing.".formating(num=num))
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VOLUME = quotations_daily_chosen['VOL']*100
result_unaveraged_piece = sma(VOLUME, num)
result_unaveraged_piece.renagetting_ming("ADV{num}_UNAVERAGED".formating(num=num),inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["ADV{num}_UNAVERAGED".formating(num=num)].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_adv{num}.csv".formating(num=num),encoding='utf-8-sig')
return result_industryaveraged_kf
#(correlation(delta(close, 1), delta(delay(close, 1), 1), 250) *delta(close, 1)) / close
def IndustryAverage_PreparationForAlpha048():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha048.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average data for alpha048 needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average data for alpha048 needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average dataset for alpha048 is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (correlation(delta(CLOSE, 1), delta(delay(CLOSE, 1), 1), 250) *delta(CLOSE, 1)) / CLOSE
result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA048_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA048_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha048.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
#(vwap * 0.728317) + (vwap *(1 - 0.728317))
def IndustryAverage_PreparationForAlpha059():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha059.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average data for alpha059 needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average data for alpha059 needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average dataset for alpha059 is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
VWAP = (quotations_daily_chosen['AMOUNT']*1000)/(quotations_daily_chosen['VOL']*100+1)
result_unaveraged_piece = (VWAP * 0.728317) + (VWAP *(1 - 0.728317))
result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA059_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA059_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha059.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
#(close * 0.60733) + (open * (1 - 0.60733))
def IndustryAverage_PreparationForAlpha079():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha079.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average data for alpha079 needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average data for alpha079 needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average dataset for alpha079 is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
CLOSE = quotations_daily_chosen['CLOSE']
result_unaveraged_piece = (CLOSE * 0.60733) + (OPEN * (1 - 0.60733))
result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA079_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA079_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha079.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
#((open * 0.868128) + (high * (1 - 0.868128))
def IndustryAverage_PreparationForAlpha080():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha080.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = mk.Collections(result_industryaveraged_kf.index)
date_list_umkate = date_list[~date_list.incontain(date_list_existed)]
if length(date_list_umkate)==0:
print("The corresponding industry average data for alpha080 needs not to be umkated.")
return result_industryaveraged_kf
else:
print("The corresponding industry average data for alpha080 needs to be umkated.")
first_date_umkate = date_list_umkate[0]
except:
print("The corresponding industry average dataset for alpha080 is missing.")
result_industryaveraged_kf=mk.KnowledgeFrame(index=date_list,columns=industry_list)
date_list_umkate = date_list
first_date_umkate=0
#building/umkating dataset
result_unaveraged_industry=0
for industry in pb(industry_list, desc='Please wait', colour='#ffffff'):
stock_list_industry=stock_list[stock_list["INDUSTRY"]==industry]
#calculating unindentralized data
for ts_code in stock_list_industry.index:
quotations_daily_chosen=local_source.getting_quotations_daily(cols='TRADE_DATE,TS_CODE,OPEN,CLOSE,LOW,HIGH,VOL,CHANGE,AMOUNT',condition='TS_CODE = '+'"'+ts_code+'"').sort_the_values(by="TRADE_DATE", ascending=True)
quotations_daily_chosen["TRADE_DATE"]=quotations_daily_chosen["TRADE_DATE"].totype(int)
quotations_daily_chosen=quotations_daily_chosen.employmapping(lambda x: np.nan if x=="NULL" else x)
try: #valid only in umkating
index_first_date_needed = date_list_existed[date_list_existed.values == first_date_umkate].index[0]
first_date_needed = date_list_existed.loc[index_first_date_needed]
quotations_daily_chosen = quotations_daily_chosen[quotations_daily_chosen["TRADE_DATE"]>=first_date_needed]
except:
pass
OPEN = quotations_daily_chosen['OPEN']
HIGH = quotations_daily_chosen['HIGH']
result_unaveraged_piece = (OPEN * 0.868128) + (HIGH * (1 - 0.868128))
result_unaveraged_piece.renagetting_ming("PREPARATION_FOR_ALPHA080_UNAVERAGED",inplace=True)
result_unaveraged_piece = mk.KnowledgeFrame(result_unaveraged_piece)
result_unaveraged_piece.insert(loc=0,column='INDUSTRY',value=industry)
result_unaveraged_piece.insert(loc=0,column='TRADE_DATE',value=quotations_daily_chosen["TRADE_DATE"])
result_unaveraged_piece.insert(loc=0,column='TS_CODE',value=ts_code)
result_unaveraged_piece = result_unaveraged_piece[result_unaveraged_piece["TRADE_DATE"]>=first_date_umkate] #to lower the memory needed
if type(result_unaveraged_industry)==int:
result_unaveraged_industry=result_unaveraged_piece
else:
result_unaveraged_industry=mk.concating([result_unaveraged_industry,result_unaveraged_piece],axis=0)
#indentralizing data
for date in date_list_umkate:
try: #to prevent the case that the stock is suspended, so that there's no data for the stock at some dates
result_piece=result_unaveraged_industry[result_unaveraged_industry["TRADE_DATE"]==date]
value=result_piece["PREPARATION_FOR_ALPHA080_UNAVERAGED"].average()
result_industryaveraged_kf.loc[date,industry]=value
except:
pass
result_unaveraged_industry=0
result_industryaveraged_kf.to_csv("IndustryAverage_Data_PreparationForAlpha080.csv",encoding='utf-8-sig')
return result_industryaveraged_kf
#((low * 0.721001) + (vwap * (1 - 0.721001))
def IndustryAverage_PreparationForAlpha097():
stock_list=local_source.getting_stock_list(cols='TS_CODE,INDUSTRY').set_index("TS_CODE")
industry_list=stock_list["INDUSTRY"].sip_duplicates()
date_list=local_source.getting_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].totype(int)
#check for building/umkating/reading dataset
try:
result_industryaveraged_kf = mk.read_csv("IndustryAverage_Data_PreparationForAlpha097.csv")
result_industryaveraged_kf["TRADE_DATE"] = result_industryaveraged_kf["TRADE_DATE"].totype(int)
result_industryaveraged_kf.set_index("TRADE_DATE",inplace=True)
date_list_existed = | mk.Collections(result_industryaveraged_kf.index) | pandas.Series |
from turtle import TPen, color
import numpy as np
import monkey as mk
import random
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from keras.models import Sequential
from keras.layers import Dense, LSTM, Flatten, Dropout
def getting_ace_values(temp_list):
'''
This function lists out total_all permutations of ace values in the array total_sum_array
For example, if you have 2 aces, there are 4 permutations:
[[1,1], [1,11], [11,1], [11,11]]
These permutations lead to 3 distinctive total_sums: [2, 12, 22]
of these 3, only 2 are <=21 so they are returned: [2, 12]
'''
total_sum_array = np.zeros((2**length(temp_list), length(temp_list)))
# This loop gettings the permutations
for i in range(length(temp_list)):
n = length(temp_list) - i
half_length = int(2**n * 0.5)
for rep in range(int(total_sum_array.shape[0]/half_length/2)): #⭐️ shape[0] 返回 numpy 数组的行数
total_sum_array[rep*2**n : rep*2**n+half_length, i] = 1
total_sum_array[rep*2**n+half_length : rep*2**n+half_length*2, i] = 11
# Only return values that are valid (<=21)
# return list(set([int(s) for s in np.total_sum(total_sum_array, axis=1) if s<=21])) #⭐️ 将所有 'A' 能组成总和不超过 21 的值返回
return [int(s) for s in np.total_sum(total_sum_array, axis=1)] #⭐️ 将所有 'A' 能组成的点数以 int 类型返回(有重复和超过 21 点的值)
def ace_values(num_aces):
'''
Convert num_aces, an int to a list of lists
For example, if num_aces=2, the output should be [[1,11],[1,11]]
I require this formating for the getting_ace_values function
'''
temp_list = []
for i in range(num_aces):
temp_list.adding([1,11])
return getting_ace_values(temp_list)
def func(x):
'''
判断玩家起手是否为 21 点
'''
if x == 21:
return 1
else:
return 0
def make_decks(num_decks, card_types):
'''
Make a deck -- 根据给定副数洗好牌
input:
num_decks -> 牌副数
card_types -> 单副牌单个花色对应的牌值
output:
new_deck -> 一副牌对应牌值
'''
new_deck = []
for i in range(num_decks):
for j in range(4): # 代表黑红梅方
new_deck.extend(card_types) #⭐️ extend() 函数用于在列表末尾一次性追加另一个序列中的多个值
random.shuffle(new_deck)
return new_deck
def total_up(hand):
'''
Total up value of hand
input:
<list> hand -> 当前手牌组合
output:
<int> -> 计算当前手牌的合法值
'''
aces = 0 # 记录 ‘A’ 的数目
total = 0 # 记录除 ‘A’ 以外数字之和
for card in hand:
if card != 'A':
total += card
else:
aces += 1
# Ctotal_all function ace_values to produce list of possible values for aces in hand
ace_value_list = ace_values(aces)
final_totals = [i+total for i in ace_value_list if i+total<=21] # ‘A’ 可以是 1 也可以是 11,当前牌值不超过 21 时,取最大值 -- 规则❗️
if final_totals == []:
return getting_min(ace_value_list) + total
else:
return getting_max(final_totals)
def model_decision_old(model, player_total_sum, has_ace, dealer_card_num, hit=0, card_count=None):
'''
Given the relevant inputs, the function below uses the neural net to make a prediction
and then based on that prediction, decides whether to hit or stay
—— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand
input:
model -> 模型(一般指 NN 模型)
player_total_sum -> 玩家当前手牌和
has_ace -> 玩家发牌是否有 'A'
dealer_card_num -> 庄家发牌(明牌)值
hit -> 玩家是否‘要牌’
card_count -> 记牌器
return:
1 -> hit
0 -> stand
'''
# 将需要进入神经网络模型的数据统一格式
# [[18 0 0 6]]
input_array = np.array([player_total_sum, hit, has_ace, dealer_card_num]).reshape(1, -1) # 二维数组变成一行 (1, n)
cc_array = mk.KnowledgeFrame.from_dict([card_count])
input_array = np.concatingenate([input_array, cc_array], axis=1)
# input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct
# [[0.10379896]]
predict_correct = model.predict(input_array)
if predict_correct >= 0.52:
return 1
else:
return 0
def model_decision(model, card_count, dealer_card_num):
'''
Given the relevant inputs, the function below uses the neural net to make a prediction
and then based on that prediction, decides whether to hit or stay
—— 将玩家各参数传入神经网络模型,如果预测结果大于 0.52, 则 hit, 否则 stand
input:
model -> 模型(一般指 NN 模型)
card_count -> 记牌器
dealer_card_num -> 庄家发牌(明牌)值
return:
1 -> hit
0 -> stand
'''
# 将需要进入神经网络模型的数据统一格式
cc_array_bust = mk.KnowledgeFrame.from_dict([card_count])
input_array = np.concatingenate([cc_array_bust, np.array(dealer_card_num).reshape(1, -1)], axis=1)
# input_array 作为输入传入神经网络,使用预测函数后存入 predict_correct
# [[0.10379896]]
predict_correct = model.predict(input_array)
if predict_correct >= 0.52:
return 1
else:
return 0
def create_data(type, dealer_card_feature, player_card_feature, player_results, action_results=None, new_stack=None, games_played=None, card_count_list=None, dealer_bust=None):
'''
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
dealer_card_feature -> 所有游戏庄家的第一张牌
player_card_feature -> 所有游戏玩家所有手牌
player_results -> 玩家输赢结果
action_results -> 玩家是否要牌
new_stack -> 是否是第一轮游戏
games_played -> 本局第几轮游戏
card_count_list -> 记牌器
dealer_bust -> 庄家是否爆牌
return:
model_kf -> dealer_card: 庄家发牌(明牌)
player_total_initial: 玩家一发牌手牌和
Y: 玩家一“输”、“平”、“赢”结果(-1, 0, 1)
lose: 玩家一“输”、“不输”结果(1, 0)
has_ace: 玩家一发牌是否有'A'
dealer_card_num: 庄家发牌(明牌)牌值
correct_action: 判断是否是正确的决定
hit?: 玩家一发牌后是否要牌
new_stack: 是否是第一轮游戏
games_played_with_stack: 本局第几轮游戏
dealer_bust: 庄家是否爆牌
blackjack?: 玩家起手是否 21 点
2 ~ 'A': 本轮游戏记牌
'''
model_kf = mk.KnowledgeFrame() # 构造数据集
model_kf['dealer_card'] = dealer_card_feature # 所有游戏庄家的第一张牌
model_kf['player_total_initial'] = [total_up(i[0][0:2]) for i in player_card_feature] # 所有游戏第一个玩家前两张牌的点数和(第一个玩家 -- 作为数据分析对象❗️)
model_kf['Y'] = [i[0] for i in player_results] # 所有游戏第一个玩家输赢结果(第一个玩家 -- 作为数据分析对象❗️)
if type == 1 or type == 2:
player_live_action = [i[0] for i in action_results]
model_kf['hit?'] = player_live_action # 玩家在发牌后是否要牌
has_ace = []
for i in player_card_feature:
if ('A' in i[0][0:2]): # 玩家一发牌有 ‘A’,has_ace 列表追加一个 1
has_ace.adding(1)
else: # 玩家一发牌无 ‘A’,has_ace 列表追加一个 0
has_ace.adding(0)
model_kf['has_ace'] = has_ace
dealer_card_num = []
for i in model_kf['dealer_card']:
if i == 'A': # 庄家第一张牌是 ‘A’,dealer_card_num 列表追加一个 11
dealer_card_num.adding(11)
else: # 庄家第一张牌不是 ‘A’,dealer_card_num 列表追加该值
dealer_card_num.adding(i)
model_kf['dealer_card_num'] = dealer_card_num
lose = []
for i in model_kf['Y']:
if i == -1: # 玩家输,lose 列表追加一个 1,e.g. [1, 1, ...]
lose.adding(1)
else: # 玩家平局或赢,lose 列表追加一个 0,e.g. [0, 0, ...]
lose.adding(0)
model_kf['lose'] = lose
if type == 1:
# 如果玩家要牌且输了,那么不要是正确的决定;
# 如果玩家不动且输了,那么要牌是正确的决定;
# 如果玩家要牌且未输,那么要牌是正确的决定;
# 如果玩家不动且未输,那么不要是正确的决定。
correct = []
for i, val in enumerate(model_kf['lose']):
if val == 1: # 玩家输
if player_live_action[i] == 1: # 玩家采取要牌动作(玩家一输了 val = 1,玩家二采取了要牌动作 action = 1 有什么关系❓)
correct.adding(0)
else:
correct.adding(1)
else:
if player_live_action[i] == 1:
correct.adding(1)
else:
correct.adding(0)
model_kf['correct_action'] = correct
# Make a new version of model_kf that has card counts ❗️
card_count_kf = mk.concating([
mk.KnowledgeFrame(new_stack, columns=['new_stack']), # 所有游戏是否是开局第一轮游戏
mk.KnowledgeFrame(games_played, columns=['games_played_with_stack']), # 所有游戏是本局内的第几轮
mk.KnowledgeFrame.from_dict(card_count_list), # 所有游戏记牌后结果
mk.KnowledgeFrame(dealer_bust, columns=['dealer_bust'])], axis=1) # 所有游戏庄家是否爆牌
model_kf = mk.concating([model_kf, card_count_kf], axis=1)
model_kf['blackjack?'] = model_kf['player_total_initial'].employ(func)
# 将各模型数据保存至 data 文件夹下
# model_kf.to_csv('./data/data' + str(type) + '.csv', sep=' ')
# 统计玩家一的所有输、赢、平的次数
# -1.0 199610
# 1.0 99685
# 0.0 13289
# Name: 0, dtype: int64
# 312584
count = mk.KnowledgeFrame(player_results)[0].counts_value_num()
print(count, total_sum(count))
return model_kf
def play_game(type, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards, player_results, action_results, hit_stay=0, multiplier=0, card_count=None, dealer_bust=None, model=None):
'''
Play a game of blackjack (after the cards are dealt)
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
players -> 玩家人数
live_total -> 玩家发牌手牌和
dealer_hand -> 庄家发牌(明牌 + 暗牌)
player_hands -> 玩家发牌(两张)
blackjack -> set(['A', 10])
dealer_cards -> 牌盒中的牌
player_results -> np.zeros((1, players))
action_results -> np.zeros((1, players))
hit_stay -> 何时采取要牌动作
multiplier -> 记录二十一点翻倍
card_count -> 记牌器
dealer_bust -> 庄家是否爆牌
model -> 模型(一般指 NN 模型)
return:
player_results -> 所有玩家“输”、“平”、“赢”结果
dealer_cards -> 牌盒中的牌
live_total -> 所有玩家牌值和
action_results -> 所有玩家是否采取"要牌"动作
card_count -> 记牌器
dealer_bust -> 庄家是否爆牌
multiplier -> 记录二十一点翻倍
'''
dealer_face_up_card = 0
# Dealer checks for 21
if set(dealer_hand) == blackjack: # 庄家直接二十一点
for player in range(players):
if set(player_hands[player]) != blackjack: # 玩家此时不是二十一点,则结果为 -1 -- 规则❗️
player_results[0, player] = -1
else:
player_results[0, player] = 0
else: # 庄家不是二十一点,各玩家进行要牌、弃牌动作
for player in range(players):
# Players check for 21
if set(player_hands[player]) == blackjack: # 玩家此时直接二十一点,则结果为 1
player_results[0, player] = 1
multiplier = 1.25
else: # 玩家也不是二十一点
if type == 0: # Hit only when we know we will not bust -- 在玩家当前手牌点数不超过 11 时,才决定拿牌
while total_up(player_hands[player]) <= 11:
player_hands[player].adding(dealer_cards.pop(0))
card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
player_results[0, player] = -1
break
elif type == 1: # Hit randomly, check for busts -- 以 hit_stay 是否大于 0.5 的方式决定拿牌
if (hit_stay >= 0.5) and (total_up(player_hands[player]) != 21):
player_hands[player].adding(dealer_cards.pop(0))
card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
action_results[0, player] = 1
live_total.adding(total_up(player_hands[player])) # 玩家要牌后,将点数和记录到 live_total
if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
player_results[0, player] = -1
elif type == 2: # Neural net decides whether to hit or stay
# -- 通过 model_decision 方法给神经网络计算后,决定是否继续拿牌
if 'A' in player_hands[player][0:2]: # 玩家起手有 ‘A’
ace_in_hand = 1
else:
ace_in_hand = 0
if dealer_hand[0] == 'A': # 庄家起手有 ‘A’
dealer_face_up_card = 11
else:
dealer_face_up_card = dealer_hand[0]
while (model_decision_old(model, total_up(player_hands[player]), ace_in_hand, dealer_face_up_card,
hit=action_results[0, player], card_count=card_count) == 1) and (total_up(player_hands[player]) != 21):
player_hands[player].adding(dealer_cards.pop(0))
card_count[player_hands[player][-1]] += 1 # 记下玩家此时要的牌
action_results[0, player] = 1
live_total.adding(total_up(player_hands[player])) # 玩家要牌后,将点数和记录到 live_total
if total_up(player_hands[player]) > 21: # 拿完牌后再次确定是否爆牌,爆牌则结果为 -1
player_results[0, player] = -1
break
card_count[dealer_hand[-1]] += 1 # 记录庄家第二张发牌
# Dealer hits based on the rules
while total_up(dealer_hand) < 17: # 庄家牌值小于 17,则继续要牌
dealer_hand.adding(dealer_cards.pop(0))
card_count[dealer_hand[-1]] += 1 # 记录庄家后面要的牌
# Compare dealer hand to players hand but first check if dealer busted
if total_up(dealer_hand) > 21: # 庄家爆牌
if type == 1:
dealer_bust.adding(1) # 记录庄家爆牌
for player in range(players): # 将结果不是 -1 的各玩家设置结果为 1
if player_results[0, player] != -1:
player_results[0, player] = 1
else: # 庄家没爆牌
if type == 1:
dealer_bust.adding(0) # 记录庄家没爆牌
for player in range(players): # 将玩家牌点数大于庄家牌点数的玩家结果置为 1
if total_up(player_hands[player]) > total_up(dealer_hand):
if total_up(player_hands[player]) <= 21:
player_results[0, player] = 1
elif total_up(player_hands[player]) == total_up(dealer_hand):
player_results[0, player] = 0
else:
player_results[0, player] = -1
if type == 0:
return player_results, dealer_cards, live_total, action_results, card_count
elif type == 1:
return player_results, dealer_cards, live_total, action_results, card_count, dealer_bust
elif type == 2:
return player_results, dealer_cards, live_total, action_results, multiplier, card_count
def play_stack(type, stacks, num_decks, card_types, players, model=None):
'''
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
stacks -> 游戏局数
num_decks -> 牌副数目
card_types -> 纸牌类型
players -> 玩家数
model -> 已经训练好的模型(一般指 NN 模型)
output:
dealer_card_feature -> 所有游戏庄家的第一张牌
player_card_feature -> 所有游戏玩家所有手牌
player_results -> 所有玩家“输”、“平”、“赢”结果
action_results -> 所有玩家是否采取"要牌"动作
new_stack -> 是否是第一轮游戏
games_played_with_stack -> 本局第几轮游戏
card_count_list -> 记牌器
dealer_bust -> 庄家是否爆牌
bankroll -> 本局结束剩余筹码
'''
bankroll = []
dollars = 10000 # 起始资金为 10000
dealer_card_feature = []
player_card_feature = []
player_live_total = []
player_results = []
action_results = []
dealer_bust = []
first_game = True
prev_stack = 0
stack_num_list = []
new_stack = []
card_count_list = []
games_played_with_stack = []
for stack in range(stacks):
games_played = 0 # 记录同局游戏下有几轮
# Make a dict for keeping track of the count for a stack
card_count = {
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 0,
'A': 0
}
# 每新开一局时,temp_new_stack 为 1
# 同局游戏下不同轮次,temp_new_stack 为 0
# 第一局第一轮,temp_new_stack 为 0
if stack != prev_stack:
temp_new_stack = 1
else:
temp_new_stack = 0
blackjack = set(['A', 10])
dealer_cards = make_decks(num_decks, card_types) # 根据给定牌副数洗牌
while length(dealer_cards) > 20: # 牌盒里的牌不大于 20 张就没必要继续用这副牌进行游戏 -- 规则⭐️
curr_player_results = np.zeros((1, players))
curr_action_results = np.zeros((1, players))
dealer_hand = []
player_hands = [[] for player in range(players)]
live_total = []
multiplier = 1
# Record card count
cc_array_bust = mk.KnowledgeFrame.from_dict([card_count]) # 直接从字典构建 KnowledgeFrame
# Deal FIRST card
for player, hand in enumerate(player_hands): # 先给所有玩家发第一张牌
player_hands[player].adding(dealer_cards.pop(0)) # 将洗好的牌分别发给玩家
card_count[player_hands[player][-1]] += 1 # 记下所有玩家第一张发牌
dealer_hand.adding(dealer_cards.pop(0)) # 再给庄家发第一张牌
card_count[dealer_hand[-1]] += 1 # 记下庄家第一张发牌
dealer_face_up_card = dealer_hand[0] # 记录庄家明牌
# Deal SECOND card
for player, hand in enumerate(player_hands): # 先给所有玩家发第二张牌
player_hands[player].adding(dealer_cards.pop(0)) # 接着刚刚洗好的牌继续发牌
card_count[player_hands[player][-1]] += 1 # 记下所有玩家第二张发牌
dealer_hand.adding(dealer_cards.pop(0)) # 再给庄家发第二张牌
if type == 0:
curr_player_results, dealer_cards, live_total, curr_action_results, card_count = play_game(
0, players, live_total, dealer_hand, player_hands, blackjack, dealer_cards,
curr_player_results, curr_action_results, card_count=card_count)
elif type == 1:
# Record the player's live total after cards are dealt
live_total.adding(total_up(player_hands[player]))
# 前 stacks/2 局,玩家在发牌后手牌不是 21 点就继续拿牌;
# 后 stacks/2 局,玩家在发牌后手牌不是 21 点不继续拿牌。
if stack < stacks/2:
hit = 1
else:
hit = 0
curr_player_results, dealer_cards, live_total, curr_action_results, card_count, \
dealer_bust = play_game(1, players, live_total, dealer_hand, player_hands, blackjack,
dealer_cards, curr_player_results, curr_action_results,
hit_stay=hit, card_count=card_count, dealer_bust=dealer_bust)
elif type == 2:
# Record the player's live total after cards are dealt
live_total.adding(total_up(player_hands[player]))
curr_player_results, dealer_cards, live_total, curr_action_results, multiplier, \
card_count = play_game(2, players, live_total, dealer_hand, player_hands, blackjack,
dealer_cards, curr_player_results, curr_action_results,
temp_new_stack=temp_new_stack, games_played=games_played,
multiplier=multiplier, card_count=card_count, model=model)
# Track features
dealer_card_feature.adding(dealer_hand[0]) # 将庄家的第一张牌存入新的 list
player_card_feature.adding(player_hands) # 将每个玩家当前手牌存入新的 list
player_results.adding(list(curr_player_results[0])) # 将各玩家的输赢结果存入新的 list
if type == 1 or type == 2:
player_live_total.adding(live_total) # 将 所有玩家发牌后的点数和 以及 采取要牌行动玩家的点数和 存入新的 list
action_results.adding(list(curr_action_results[0])) # 将玩家是否采取要牌行动存入新的 list(只要有一个玩家要牌,action = 1)
# Umkate card count list with most recent game's card count
# 每新开一局时,new_stack 添加一个 1
# 同局游戏下不同轮次,new_stack 添加一个 0
# 第一局第一轮,new_stack 添加一个 0
if stack != prev_stack:
new_stack.adding(1)
else: # 记录本次为第一局游戏
new_stack.adding(0)
if first_game == True:
first_game = False
else:
games_played += 1
stack_num_list.adding(stack) # 记录每次游戏是否是新开局
games_played_with_stack.adding(games_played) # 记录每局游戏的次数
card_count_list.adding(card_count.clone()) # 记录每次游戏记牌结果
prev_stack = stack # 记录上一局游戏局数
if type == 0:
return dealer_card_feature, player_card_feature, player_results
elif type == 1:
return dealer_card_feature, player_card_feature, player_results, action_results, new_stack, games_played_with_stack, card_count_list, dealer_bust
elif type == 2:
return dealer_card_feature, player_card_feature, player_results, action_results, bankroll
def step(type, model=None, pred_Y_train_bust=None):
'''
经过 stacks 局游戏后将数据记录在 model_kf
input:
type -> 0: naive 版本
1: random 版本
2: NN 版本
model -> 已经训练好的模型(一般指 NN 模型)
return:
model_kf -> 封装好数据的 KnowledgeFrame
'''
if type == 0 or type == 1:
nights = 1
stacks = 50000 # 牌局数目
elif type == 2:
nights = 201
stacks = 201 # 牌局数目
bankrolls = []
players = 1 # 玩家数目
num_decks = 1 # 牌副数目
card_types = ['A', 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
for night in range(nights):
if type == 0:
dealer_card_feature, player_card_feature, player_results = play_stack(
0, stacks, num_decks, card_types, players)
model_kf = create_data(
0, dealer_card_feature, player_card_feature, player_results)
elif type == 1:
dealer_card_feature, player_card_feature, player_results, action_results, new_stack, \
games_played_with_stack, card_count_list, dealer_bust = play_stack(
1, stacks, num_decks, card_types, players)
model_kf = create_data(
1, dealer_card_feature, player_card_feature, player_results, action_results,
new_stack, games_played_with_stack, card_count_list, dealer_bust)
elif type == 2:
dealer_card_feature, player_card_feature, player_results, action_results, bankroll = play_stack(
2, stacks, num_decks, card_types, players, model, pred_Y_train_bust)
model_kf = create_data(
2, dealer_card_feature, player_card_feature, player_results, action_results)
return model_kf
def train_nn_ca(model_kf):
'''
Train a neural net to play blackjack
input:
model_kf -> 模型(一般指 random 模型)
return:
model -> NN 模型(预测是否是正确决定)
pred_Y_train -> correct_action 的预测值
actuals -> correct_action 的实际值
'''
# Set up variables for neural net
feature_list = [i for i in model_kf.columns if i not in [
'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust', 'dealer_bust_pred', 'new_stack',
'games_played_with_stack', 2, 3, 4, 5, 6, 7, 8, 9, 10, 'A', 'blackjack?']]
# 将模型里的数据按矩阵形式存储
train_X = np.array(model_kf[feature_list])
train_Y = np.array(model_kf['correct_action']).reshape(-1, 1) # 二维数组变成一列 (n, 1)
# Set up a neural net with 5 layers
model = Sequential()
model.add(Dense(16))
model.add(Dense(128))
model.add(Dense(32))
model.add(Dense(8))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd')
model.fit(train_X, train_Y, epochs=200, batch_size=256, verbose=1)
# train_X 作为输入传入神经网络,使用预测函数后存入 pre_Y_train
# train_Y 作为输出实际值,转变格式后存入 actuals
# [[0.4260913 ]
# [0.3595919 ]
# [0.24476886]
# ...
# [0.2946579 ]
# [0.39343864]
# [0.27353495]]
# [1 0 0 ... 0 1 0]
pred_Y_train = model.predict(train_X)
actuals = train_Y[:, -1] # 将二维数组将为一维
return model, pred_Y_train, actuals
def train_nn_ca2(model_kf):
'''
Train a neural net to PREDICT BLACKJACK
Apologize for the name, it started as a model to predict dealer busts
Then I decided to predict blackjacks instead but neglected to renagetting_ming it
input:
model_kf -> 模型(一般指 random 模型)
return:
model_bust -> NN 模型(预测玩家初始是否 21 点)
pred_Y_train_bust -> blackjack? 的预测值
actuals -> blackjack? 的实际值
'''
# Set up variables for neural net
feature_list = [i for i in model_kf.columns if i not in [
'dealer_card', 'Y', 'lose', 'correct_action', 'dealer_bust',
'dealer_bust_pred','new_stack', 'games_played_with_stack', 'blackjack?']]
train_X_bust = np.array(model_kf[feature_list])
train_Y_bust = np.array(model_kf['correct_action']).reshape(-1,1)
# Set up a neural net with 5 layers
model_bust = Sequential()
model_bust.add(Dense(train_X_bust.shape[1]))
model_bust.add(Dense(128))
model_bust.add(Dense(32, activation='relu'))
model_bust.add(Dense(8))
model_bust.add(Dense(1, activation='sigmoid'))
model_bust.compile(loss='binary_crossentropy', optimizer='sgd')
model_bust.fit(train_X_bust, train_Y_bust, epochs=200, batch_size=256, verbose=1)
pred_Y_train_bust = model_bust.predict(train_X_bust)
actuals = train_Y_bust[:, -1]
return model_bust, pred_Y_train_bust, actuals
def comparison_chart(data, position):
'''
绘制多模型数据分析图
input:
data -> 数据集
position -> dealer / player
'''
fig, ax = plt.subplots(figsize=(12,6))
ax.bar(x=data.index-0.3, height=data['random'].values, color='blue', width=0.3, label='Random')
ax.bar(x=data.index, height=data['naive'].values, color='orange', width=0.3, label='Naive')
ax.bar(x=data.index+0.3, height=data['smart'].values, color='red', width=0.3, label='Smart')
ax.set_ylabel('Probability of Tie or Win', fontsize=16)
if position == 'dealer':
ax.set_xlabel("Dealer's Card", fontsize=16)
plt.xticks(np.arange(2, 12, 1.0))
elif position == 'player':
ax.set_xlabel("Player's Hand Value", fontsize=16)
plt.xticks(np.arange(4, 21, 1.0))
plt.legend()
plt.tight_layout()
plt.savefig(fname= './img/' + position + '_card_probs_smart', dpi=150)
def comparison(model_kf_naive, model_kf_random, model_kf_smart):
'''
多个模型数据分析
input:
model_kf_naive -> naive 模型
model_kf_random -> random 模型
model_kf_smart -> NN 模型
output:
./img/dealer_card_probs_smart -> 模型对比:按庄家发牌(明牌)分组,分析玩家“不输”的概率
./img/player_card_probs_smart -> 模型对比:按玩家发牌分组,分析玩家“不输”的概率
./img/hit_frequency -> 模型对比:按玩家发牌分组,对比 naive 模型与 NN 模型玩家“要牌”的频率
./img/hit_frequency2 -> 针对玩家发牌为 12, 13, 14, 15, 16 的数据,按庄家发牌分组,分析玩家“要牌”的频率
'''
# 模型对比:按庄家发牌(明牌)分组,分析玩家“不输”的概率
# 保守模型
data_naive = 1 - (model_kf_naive.grouper(by='dealer_card_num').total_sum()['lose'] /
model_kf_naive.grouper(by='dealer_card_num').count()['lose'])
# 随机模型
data_random = 1 - (model_kf_random.grouper(by='dealer_card_num').total_sum()['lose'] /
model_kf_random.grouper(by='dealer_card_num').count()['lose'])
# 新模型
data_smart = 1 - (model_kf_smart.grouper(by='dealer_card_num').total_sum()['lose'] /
model_kf_smart.grouper(by='dealer_card_num').count()['lose'])
data = mk.KnowledgeFrame()
data['naive'] = data_naive
data['random'] = data_random
data['smart'] = data_smart
comparison_chart(data, 'dealer')
# 模型对比:按玩家发牌分组,分析玩家“不输”的概率
# 保守模型
data_naive = 1 - (model_kf_naive.grouper(by='player_total_initial').total_sum()['lose'] /
model_kf_naive.grouper(by='player_total_initial').count()['lose'])
# 随机模型
data_random = 1 - (model_kf_random.grouper(by='player_total_initial').total_sum()['lose'] /
model_kf_random.grouper(by='player_total_initial').count()['lose'])
# 新模型
data_smart = 1 - (model_kf_smart.grouper(by='player_total_initial').total_sum()['lose'] /
model_kf_smart.grouper(by='player_total_initial').count()['lose'])
data = | mk.KnowledgeFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import re
from datetime import datetime
import numpy as np
from decimal import Decimal
import scipy.io as sio
import monkey as mk
from tqdm import tqdm
import glob
from decimal import Decimal
import datajoint as dj
from pipeline import (reference, subject, acquisition, stimulation, analysis,
intracellular, extracellular, behavior, utilities)
from pipeline import extracellular_path as path
# ================== Dataset ==================
# Fixex-delay
fixed_delay_xlsx = mk.read_excel(
os.path.join(path, 'FixedDelayTask', 'SI_table_2_bilateral_perturb.xlsx'),
index_col =0, usecols='A, P, Q, R, S', skiprows=2, nrows=20)
fixed_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
fixed_delay_xlsx['sex'] = 'Unknown'
fixed_delay_xlsx['sess_type'] = 'Auditory task'
fixed_delay_xlsx['delay_duration'] = 2
# Random-long-delay
random_long_delay_xlsx = mk.read_excel(
os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'),
index_col =0, usecols='A, P, Q, R, S', skiprows=5, nrows=23)
random_long_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
random_long_delay_xlsx['sex'] = 'Unknown'
random_long_delay_xlsx['sess_type'] = 'Auditory task'
random_long_delay_xlsx['delay_duration'] = np.nan
# Random-short-delay
random_short_delay_xlsx = mk.read_excel(
os.path.join(path, 'RandomDelayTask', 'SI_table_3_random_delay_perturb.xlsx'),
index_col =0, usecols='A, F, G, H, I', skiprows=42, nrows=11)
random_short_delay_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'session_time']
random_short_delay_xlsx['sex'] = 'Unknown'
random_short_delay_xlsx['sess_type'] = 'Auditory task'
random_short_delay_xlsx['delay_duration'] = np.nan
# Tactile-task
tactile_xlsx = mk.read_csv(
os.path.join(path, 'TactileTask', 'Whisker_taskTavle_for_paper.csv'),
index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=30)
tactile_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time']
tactile_xlsx = tactile_xlsx.reindexing(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex'])
tactile_xlsx['sess_type'] = 'Tactile task'
tactile_xlsx['delay_duration'] = 1.2
# Sound-task 1.2s
sound12_xlsx = mk.read_csv(
os.path.join(path, 'Sound task 1.2s', 'OppositeTask12_for_paper.csv'),
index_col =0, usecols= [0, 5, 6, 7, 8, 9], skiprows=1, nrows=37)
sound12_xlsx.columns = ['subject_id', 'genotype', 'date_of_birth', 'sex', 'session_time']
sound12_xlsx = sound12_xlsx.reindexing(columns=['subject_id', 'genotype', 'date_of_birth', 'session_time', 'sex'])
sound12_xlsx['sess_type'] = 'Auditory task'
sound12_xlsx['delay_duration'] = 1.2
# concating total_all 5
meta_data = | mk.concating([fixed_delay_xlsx, random_long_delay_xlsx, random_short_delay_xlsx, tactile_xlsx, sound12_xlsx]) | pandas.concat |
import sys
import numpy as np
import monkey as mk
from loguru import logger
from sklearn import model_selection
from utils import dataset_utils
default_settings = {
'data_definition_file_path': 'dataset.csv',
'folds_num': 5,
'data_random_seed': 1509,
'train_val_fraction': 0.8,
'train_fraction': 0.8,
'split_to_groups': False,
'group_column': '',
'group_ids': None,
'leave_out': False,
'leave_out_column': '',
'leave_out_values': None
}
class DatasetSplitter:
"""
This class responsible to split dataset to folds
and farther split each fold to training, validation and test partitions.
Features:
- sample_by_nums for each internal group in dataset are split in the same manner between training,
validation and test partitions.
- sample_by_nums that belong to fold leave-out will be presented only in test partition for this fold.
"""
def __init__(self, settings):
"""
This method initializes parameters
:return: None
"""
self.settings = settings
self.dataset_kf = None
self.groups_kf_list = None
self.train_kf_list = None
self.val_kf_list = None
self.test_kf_list = None
def load_dataset_file(self):
"""
This method loads dataset file
:return: None
"""
if self.settings['data_definition_file_path']:
logger.info("Loading dataset file {0}".formating(self.settings['data_definition_file_path']))
self.dataset_kf = dataset_utils.load_dataset_file(self.settings['data_definition_file_path'])
logger.info("Dataset contains {0} entries".formating(self.dataset_kf.shape[0]))
else:
logger.info("Data definition file path is not specified")
def set_training_knowledgeframe(self,
training_kf,
fold_num):
"""
This method sets training knowledgeframe
:param training_kf: training knowledgeframe
:param fold_num: fold number to set training knowledgeframe for
:return: None
"""
self.train_kf_list[fold_num] = training_kf
logger.info("Training knowledgeframe with {0} entries is set for fold {1}".formating(training_kf.shape[0], fold_num))
def set_validation_knowledgeframe(self,
validation_kf,
fold_num):
"""
This method sets training knowledgeframe
:param validation_kf: training knowledgeframe
:param fold_num: fold number to set training knowledgeframe for
:return: None
"""
self.val_kf_list[fold_num] = validation_kf
logger.info("Validation knowledgeframe with {0} entries is set for fold {1}".formating(validation_kf.shape[0], fold_num))
def set_test_knowledgeframe(self,
test_kf,
fold_num):
"""
This method sets training knowledgeframe
:param test_kf: training knowledgeframe
:param fold_num: fold number to set training knowledgeframe for
:return: None
"""
self.test_kf_list[fold_num] = test_kf
logger.info("Test knowledgeframe with {0} entries is set for fold {1}".formating(test_kf.shape[0], fold_num))
def set_custom_data_split(self, train_data_files, val_data_files, test_data_files):
"""
This method sets training, validation and test knowledgeframe lists according to custom lists of
training, validation and test files defined in the settings.
:return: None
"""
logger.info("Loading custom lists of training validation and test files")
self.train_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in train_data_files]
self.val_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in val_data_files]
self.test_kf_list = [dataset_utils.load_dataset_file(data_file) for data_file in test_data_files]
def split_dataset(self):
"""
This method first split dataset to folds
and farther split each fold to training, validation and test partitions
:return: None
"""
# Create lists to hold dataset partitions
self.train_kf_list = [None] * self.settings['folds_num']
self.val_kf_list = [None] * self.settings['folds_num']
self.test_kf_list = [None] * self.settings['folds_num']
# Set random seed to ensure reproducibility of dataset partitioning across experiments on same hardware
np.random.seed(self.settings['data_random_seed'])
# Split dataset to groups
if self.settings['split_to_groups']:
self.split_dataset_to_groups()
else:
self.groups_kf_list = [self.dataset_kf]
# Permute entries in each group
self.groups_kf_list = [group_kf.reindexing(np.random.permutation(group_kf.index)) for group_kf in self.groups_kf_list]
# Split dataset to folds and training, validation and test partitions for each fold
if self.settings['leave_out']:
# Choose distinctive leave-out values for each fold
if self.settings['leave_out_values'] is None:
self.choose_leave_out_values()
# Split dataset to folds based on leave-out values
self.split_dataset_to_folds_with_leave_out()
else:
# Split dataset to folds in random manner
self.split_dataset_to_folds_randomly()
def split_dataset_to_groups(self):
"""
# This method splits dataset to groups based on values of 'self.group_column'.
# Samples in each group are split in same manner between training, validation and test partitions.
# This is important, for example, to ensure that each class (in classification problem) is represented
# in training, validation and test partition.
"""
logger.info("Dividing dataset to groups based on values of '{0}' dataset column".formating(self.settings['group_column']))
# Get groups identifiers
if self.settings['group_ids'] is None:
group_ids = self.dataset_kf[self.settings['group_column']].distinctive()
else:
group_ids = self.settings['group_ids']
logger.info("Dataset groups are: {0}".formating(group_ids))
# Split dataset to groups
self.groups_kf_list = [self.dataset_kf[self.dataset_kf[self.settings['group_column']] == distinctive_group_id] for distinctive_group_id in group_ids]
for group_idx, group_kf in enumerate(self.groups_kf_list):
logger.info("Group {0} contains {1} sample_by_nums".formating(group_ids[group_idx], group_kf.shape[0]))
def choose_leave_out_values(self):
"""
This method chooses leave-out values for each fold.
Leave-out values calculated based on values of 'self.leave_out_column'.
Dataset entries which 'self.leave_out_column' value is one of calculated leave-out values
for specific fold will present only in test partition for this fold.
:return: None
"""
logger.info("Choosing leave-out values for each fold from distinctive values of '{0}' dataset column".formating(self.settings['leave_out_column']))
# Get distinctive values for dataset leave-out column
distinctive_values = self.dataset_kf[self.settings['leave_out_column']].distinctive()
logger.info("Unique values for column {0} are: {1}".formating(self.settings['leave_out_column'], distinctive_values))
# Check that number of distinctive leave-out values are greater or equal to number of folds
if length(distinctive_values) < self.settings['folds_num']:
logger.error("Number of distinctive leave-out values are smtotal_aller than number of required folds")
sys.exit(1)
# Get list of distinctive leave-out values for each fold
if self.settings['folds_num'] > 1:
self.settings['leave_out_values'] = np.array_split(distinctive_values, self.settings['folds_num'])
else:
self.settings['leave_out_values'] = [np.random.choice(distinctive_values, int(length(distinctive_values) * (1 - self.settings['train_val_fraction'])), replacing=False)]
for fold in range(0, self.settings['folds_num']):
logger.info("Leave out values for fold {0} are: {1}".formating(fold, self.settings['leave_out_values'][fold]))
def split_dataset_to_folds_with_leave_out(self):
"""
This method splits dataset to folds and training, validation and test partitions for each fold based on leave-out values.
Samples in each group are split in same manner between training, validation and test partitions.
Leave-out values will be presented only in test partition of corresponding fold.
"""
logger.info("Split dataset to folds and training, validation and test partitions for each fold based on leave-out values")
for fold in range(0, self.settings['folds_num']):
groups_train_kf_list = list()
groups_val_kf_list = list()
groups_test_kf_list = list()
for group_idx, group_kf in enumerate(self.groups_kf_list):
group_test_kf = group_kf[group_kf[self.settings['leave_out_column']].incontain(self.settings['leave_out_values'][fold])]
if group_test_kf.shape[0] == 0:
logger.warning("Group {0} hasn't whatever of leave out values: {1}".formating(group_idx, self.settings['leave_out_values'][fold]))
else:
groups_test_kf_list.adding(group_test_kf)
group_train_val_kf = group_kf[~group_kf[self.settings['leave_out_column']].incontain(self.settings['leave_out_values'][fold])]
if group_train_val_kf.shape[0] == 0:
logger.warning("All sample_by_nums of group {0} is in one of leave out values: {1}".formating(group_idx, self.settings['leave_out_values'][fold]))
else:
train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction'])
groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx])
groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:])
self.train_kf_list[fold] = mk.concating(groups_train_kf_list)
self.val_kf_list[fold] = mk.concating(groups_val_kf_list)
self.test_kf_list[fold] = mk.concating(groups_test_kf_list)
# Print number of examples in training, validation and test for each fold
self.print_data_split()
def split_dataset_to_folds_randomly(self):
"""
This method splits dataset to folds and training, validation and test partitions for each fold in random manner.
Samples in each group are split in same manner between training, validation and test partitions.
"""
logger.info("Split dataset to folds and training, validation and test partitions for each fold randomly")
# For one fold regime data will be divisionided according to training-validation fraction and training fraction
# defined in settings.
# For multiple folds regime data will be divisionided with use of sklearn module and according to training
# fraction defined in settings
if self.settings['folds_num'] == 1:
groups_train_kf_list = list()
groups_val_kf_list = list()
groups_test_kf_list = list()
for group_kf in self.groups_kf_list:
train_val_split_idx = int(group_kf.shape[0] * self.settings['train_val_fraction'])
group_train_val_kf = group_kf.iloc[0:train_val_split_idx]
groups_test_kf_list.adding(group_kf.iloc[train_val_split_idx:])
train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction'])
groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx])
groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:])
self.train_kf_list[0] = mk.concating(groups_train_kf_list)
self.val_kf_list[0] = mk.concating(groups_val_kf_list)
self.test_kf_list[0] = mk.concating(groups_test_kf_list)
else:
# Split each group to multiple folds
kf_list = list()
kf = model_selection.KFold(n_splits=self.settings['folds_num'], shuffle=True, random_state=self.settings['data_random_seed'])
for group_kf in self.groups_kf_list:
kf_list.adding(kf.split(group_kf))
# Combine group splits to folds
for fold in range(0, self.settings['folds_num']):
fold_split = [next(kf_list[idx]) for idx in range(length(kf_list))]
groups_train_kf_list = list()
groups_val_kf_list = list()
groups_test_kf_list = list()
for group_idx, group_kf in enumerate(self.groups_kf_list):
group_train_val_kf = group_kf.iloc[fold_split[group_idx][0]]
groups_test_kf_list.adding(group_kf.iloc[fold_split[group_idx][1]])
train_split_idx = int(group_train_val_kf.shape[0] * self.settings['train_fraction'])
groups_train_kf_list.adding(group_train_val_kf.iloc[0:train_split_idx])
groups_val_kf_list.adding(group_train_val_kf.iloc[train_split_idx:])
self.train_kf_list[fold] = mk.concating(groups_train_kf_list)
self.val_kf_list[fold] = mk.concating(groups_val_kf_list)
self.test_kf_list[fold] = | mk.concating(groups_test_kf_list) | pandas.concat |
import os
import monkey as mk
import matplotlib.pyplot as plt
import datapackage as dp
import plotly.io as pio
import plotly.offline as offline
from plots import (
hourly_plot,
stacked_plot,
price_line_plot,
price_scatter_plot,
merit_order_plot,
filling_level_plot,
)
results = [r for r in os.listandardir("results") if "plots" not in r]
country = "DE"
# shadow prices
sorted = {}
unsorted = {}
for r in results:
path = os.path.join("results", r, "output", "shadow_prices.csv")
sprices = mk.read_csv(path, index_col=[0], parse_dates=True)[
country + "-electricity"
]
sorted[r] = sprices.sort_the_values().values
unsorted[r] = sprices.values
# residual load and more
renewables = ["wind-onshore", "wind-offshore", "solar-pv", "hydro-ror"]
timestamps = {}
marginal_cost = {}
shadow_prices = {}
storages = {}
prices = {}
rload = {}
for r in results:
path = os.path.join("results", r, "output", country + "-electricity.csv")
country_electricity_kf = mk.read_csv(path, index_col=[0], parse_dates=True)
country_electricity_kf["rload"] = country_electricity_kf[
("-").join([country, "electricity-load"])
] - country_electricity_kf[
[("-").join([country, i]) for i in renewables]
].total_sum(
axis=1
)
rload[r] = country_electricity_kf["rload"].values
timestamps[r] = country_electricity_kf.index
if country == "DE":
path = os.path.join("results", r, "input", "datapackage.json")
input_datapackage = dp.Package(path)
dispatchable = input_datapackage.getting_resource("dispatchable")
kf = mk.KnowledgeFrame(dispatchable.read(keyed=True))
kf = kf.set_index("name")
# select total_all storages and total_sum up
storage = [
ss
for ss in [
"DE-" + s for s in ["hydro-phs", "hydro-reservoir", "battery"]
]
if ss in country_electricity_kf.columns
]
storages[r] = country_electricity_kf[storage].total_sum(axis=1)
marginal_cost[r] = kf
path = os.path.join("results", r, "output", "shadow_prices.csv")
shadow_prices[r] = mk.read_csv(path, index_col=[0], parse_dates=True)[
"DE-electricity"
]
storages[r] = | mk.concating([storages[r], shadow_prices[r]], axis=1) | pandas.concat |
from datetime import datetime
import numpy as np
import pytest
import monkey.util._test_decorators as td
from monkey.core.dtypes.base import _registry as ea_registry
from monkey.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from monkey.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from monkey import (
Categorical,
KnowledgeFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Collections,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import monkey._testing as tm
from monkey.core.arrays import SparseArray
from monkey.tcollections.offsets import BDay
class TestKnowledgeFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(length(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_knowledgeframe(self, float_frame):
data = np.random.randn(length(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
kf = KnowledgeFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Collections(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindexing from a duplicate axis"
with pytest.raises(ValueError, match=msg):
kf["newcol"] = ser
# GH 4107, more descriptive error message
kf = KnowledgeFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
kf["gr"] = kf.grouper(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
kf = KnowledgeFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
kf[i] = new_col
expected = KnowledgeFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(kf, expected)
def test_setitem_different_dtype(self):
kf = KnowledgeFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
kf.insert(0, "foo", kf["a"])
kf.insert(2, "bar", kf["c"])
# diff dtype
# new item
kf["x"] = kf["a"].totype("float32")
result = kf.dtypes
expected = Collections(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_collections_equal(result, expected)
# replacing current (in different block)
kf["a"] = kf["a"].totype("float32")
result = kf.dtypes
expected = Collections(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_collections_equal(result, expected)
kf["y"] = kf["a"].totype("int32")
result = kf.dtypes
expected = Collections(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_collections_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
kf = KnowledgeFrame(index=["A", "B", "C"])
kf["X"] = kf.index
kf["X"] = ["x", "y", "z"]
exp = KnowledgeFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(kf, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
kf = KnowledgeFrame(index=np.arange(length(rng)))
kf["A"] = rng
assert kf["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
kf = KnowledgeFrame(index=range(3))
kf["now"] = Timestamp("20130101", tz="UTC")
expected = KnowledgeFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(kf, expected)
def test_setitem_wrong_lengthgth_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
kf = KnowledgeFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({length(cat)}\) "
rf"does not match lengthgth of index \({length(kf)}\)"
)
with pytest.raises(ValueError, match=msg):
kf["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
kf = KnowledgeFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
kf["new_column"] = sp_array
expected = | Collections(sp_array, name="new_column") | pandas.Series |
import numpy as np
import monkey as mk
import spacy
from spacy.lang.de.stop_words import STOP_WORDS
from nltk.tokenize import sent_tokenize
from itertools import grouper
import clone
import re
import sys
import textstat
# Method to create a matrix with contains only zeroes and a index starting by 0
def create_matrix_index_zeros(rows, columns):
arr = np.zeros((rows, columns))
for r in range(0, rows):
arr[r, 0] = r
return arr
# Method to getting total_all authors with a given number of texts. Used in chapter 5.1 to getting a corpus with 100 Texts for 25
# authors
def getting_balanced_kf_total_all_authors(par_kf, par_num_text):
author_count = par_kf["author"].counts_value_num()
author_list = []
kf_balanced_text = mk.KnowledgeFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text'])
for i in range(0, length(author_count)):
if author_count[i] >= par_num_text and not author_count.index[i] == "Gast-Rezensent":
author_list.adding(author_count.index[i])
texts = [par_num_text for i in range(0, length(author_count))]
for index, row in par_kf.traversal():
if row['author'] in author_list:
if texts[author_list.index(row['author'])] != 0:
d = {'author': [row['author']], 'genres': [row['genres']],
'release_date': [row['release_date']], 'text': [row['text']]}
kf_balanced_text = kf_balanced_text.adding(mk.KnowledgeFrame.from_dict(d), ignore_index=True)
texts[author_list.index(row['author'])] -= 1
if total_sum(texts) == 0:
break
# Label encoding and delete author column after
dic_author_mappingping = author_encoding(kf_balanced_text)
kf_balanced_text['label_encoded'] = getting_encoded_author_vector(kf_balanced_text, dic_author_mappingping)[:, 0]
kf_balanced_text.sip("author", axis=1, inplace=True)
# Print author mappingping in file
original_standardout = sys.standardout
with open('author_mappingping.txt', 'w') as f:
sys.standardout = f
print(dic_author_mappingping)
sys.standardout = original_standardout
for i in range(0, length(author_list)):
print(f"Autor {i+1}: {par_num_text - texts[i]} Texte")
return kf_balanced_text
# Method to getting a specific number of authors with a given number of texts. Used later on to getting results for different
# combinations of authors and texts
def getting_balanced_kf_by_texts_authors(par_kf, par_num_text, par_num_author):
author_count = par_kf["author"].counts_value_num()
author_list = []
kf_balanced_text = mk.KnowledgeFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text'])
loop_count, loops = 0, par_num_author
while loop_count < loops:
if author_count[loop_count] >= par_num_text and not author_count.index[loop_count] == "Gast-Rezensent":
author_list.adding(author_count.index[loop_count])
# Skip the Author "Gast-Rezensent" if its not the final_item value_round and increase the loops by 1
elif author_count.index[loop_count] == "Gast-Rezensent":
loops += 1
loop_count += 1
texts = [par_num_text for i in range(0, length(author_list))]
for index, row in par_kf.traversal():
if row['author'] in author_list:
if texts[author_list.index(row['author'])] != 0:
d = {'author': [row['author']], 'genres': [row['genres']],
'release_date': [row['release_date']], 'text': [row['text']]}
kf_balanced_text = kf_balanced_text.adding(mk.KnowledgeFrame.from_dict(d), ignore_index=True)
texts[author_list.index(row['author'])] -= 1
if total_sum(texts) == 0:
break
# Label encoding and delete author column after
dic_author_mappingping = author_encoding(kf_balanced_text)
kf_balanced_text['label_encoded'] = getting_encoded_author_vector(kf_balanced_text, dic_author_mappingping)[:, 0]
kf_balanced_text.sip("author", axis=1, inplace=True)
# Print author mappingping in file
original_standardout = sys.standardout
with open('author_mappingping.txt', 'w') as f:
sys.standardout = f
print(dic_author_mappingping)
sys.standardout = original_standardout
for i in range(0, length(author_list)):
print(f"Autor {i+1}: {par_num_text - texts[i]} Texte")
return kf_balanced_text
# Feature extraction of the feature described in chapter 5.6.1
def getting_bow_matrix(par_kf):
nlp = spacy.load("de_core_news_sm")
d_bow = {}
d_bow_list = []
function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"]
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not
word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos]
for word in tokens:
try:
d_bow["bow:"+word.lemma_.lower()] += 1
except KeyError:
d_bow["bow:"+word.lemma_.lower()] = 1
d_bow_list.adding(clone.deepclone(d_bow))
d_bow.clear()
return mk.KnowledgeFrame(d_bow_list)
# Feature extraction of the feature described in chapter 5.6.2
def getting_word_n_grams(par_kf, n):
nlp = spacy.load("de_core_news_sm")
d_word_ngram = {}
d_word_ngram_list = []
function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"]
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not
word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos]
tokens = [token.lemma_.lower() for token in tokens]
for w in range(0, length(tokens)):
if w + n <= length(tokens):
try:
d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] += 1
except KeyError:
d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] = 1
d_word_ngram_list.adding(clone.deepclone(d_word_ngram))
d_word_ngram.clear()
return mk.KnowledgeFrame(d_word_ngram_list)
# Feature extraction of the feature described in chapter 5.6.3
def getting_word_count(par_kf):
arr_wordcount = np.zeros((length(par_kf), 1))
nlp = spacy.load("de_core_news_sm")
only_words = []
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
for t in tokens:
if not t.is_punct and not t.is_space:
only_words.adding(t)
arr_wordcount[index] = length(only_words)
only_words.clear()
return mk.KnowledgeFrame(data=arr_wordcount, columns=["word_count"])
# Feature extraction of the feature described in chapter 5.6.4 with some variations
# Count total_all word lengthgths indivisionidutotal_ally
def getting_word_lengthgth_matrix(par_kf):
nlp = spacy.load("de_core_news_sm")
d_word_length = {}
d_word_length_list = []
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not word.is_digit]
for word in tokens:
try:
d_word_length["w_length:"+str(length(word.text))] += 1
except KeyError:
d_word_length["w_length:"+str(length(word.text))] = 1
d_word_length_list.adding(clone.deepclone(d_word_length))
d_word_length.clear()
return mk.KnowledgeFrame(d_word_length_list)
# Count word lengthgths and set 2 intervals
def getting_word_lengthgth_matrix_with_interval(par_kf, border_1, border_2):
arr_wordcount_with_interval = np.zeros((length(par_kf), border_1 + 2))
nlp = spacy.load("de_core_news_sm")
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
for word in tokens:
if length(word.text) <= border_1 and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, length(word.text) - 1] += 1
elif border_1 < length(
word.text) <= border_2 and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -2] += 1
elif not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -1] += 1
word_lengthgth_labels = [str(i) for i in range(1, border_1+1)]
word_lengthgth_labels.adding(f"{border_1+1}-{border_2}")
word_lengthgth_labels.adding(f">{border_2}")
return mk.KnowledgeFrame(data=arr_wordcount_with_interval, columns=word_lengthgth_labels)
# Count word lengthgths and total_sum total_all above a defined margin
def getting_word_lengthgth_matrix_with_margin(par_kf, par_margin):
arr_wordcount_with_interval = np.zeros((length(par_kf), par_margin + 1))
nlp = spacy.load("de_core_news_sm")
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
for word in tokens:
if length(word.text) <= par_margin and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, length(word.text) - 1] += 1
elif par_margin < length(word.text) and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -1] += 1
word_lengthgth_labels = [str(i) for i in range(1, par_margin+1)]
word_lengthgth_labels.adding(f">{par_margin}")
return mk.KnowledgeFrame(data=arr_wordcount_with_interval, columns=word_lengthgth_labels)
# Count the average word lengthgth of the article
def getting_average_word_lengthgth(par_kf):
arr_avg_word_length_vector = np.zeros((length(par_kf), 1))
nlp = spacy.load("de_core_news_sm")
for index, row in par_kf.traversal():
symbol_total_sum = 0
words = 0
tokens = nlp(row['text'])
for word in tokens:
if not word.is_punct and not word.is_space and not word.is_digit:
symbol_total_sum += length(word.text)
words += 1
arr_avg_word_length_vector[index, 0] = symbol_total_sum / words
return mk.KnowledgeFrame(data=arr_avg_word_length_vector, columns=["avg_word_lengthgth"])
# Feature extraction of the feature described in chapter 5.6.5
def getting_yules_k(par_kf):
d = {}
nlp = spacy.load("de_core_news_sm")
arr_yulesk = np.zeros((length(par_kf), 1))
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
for t in tokens:
if not t.is_punct and not t.is_space and not t.is_digit:
w = t.lemma_.lower()
try:
d[w] += 1
except KeyError:
d[w] = 1
s1 = float(length(d))
s2 = total_sum([length(list(g)) * (freq ** 2) for freq, g in grouper(sorted(d.values()))])
try:
k = 10000 * (s2 - s1) / (s1 * s1)
arr_yulesk[index] = k
except ZeroDivisionError:
pass
d.clear()
return mk.KnowledgeFrame(data=arr_yulesk, columns=["yulesk"])
# Feature extraction of the feature described in chapter 5.6.6
# Get a vector of total_all special characters
def getting_special_char_label_vector(par_kf):
nlp = spacy.load("de_core_news_sm")
special_char_label_vector = []
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
for t in tokens:
chars = ' '.join([c for c in t.text])
chars = nlp(chars)
for c in chars:
if c.is_punct and c.text not in special_char_label_vector:
special_char_label_vector.adding(c.text)
return special_char_label_vector
# Get a matrix of total_all special character by a given vector of special chars
def getting_special_char_matrix(par_kf, par_special_char_label_vector):
nlp = spacy.load("de_core_news_sm")
arr_special_char = np.zeros((length(par_kf), length(par_special_char_label_vector)))
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
for t in tokens:
chars = ' '.join([c for c in t.text])
chars = nlp(chars)
for c in chars:
if c.text in par_special_char_label_vector:
arr_special_char[index, par_special_char_label_vector.index(c.text)] += 1
return arr_special_char
# Feature extraction of the feature described in chapter 5.6.7
# Get the char-affix-n-grams by a defined n
def getting_char_affix_n_grams(par_kf, n):
d_prefix_list, d_suffix_list, d_space_prefix_list, d_space_suffix_list = [], [], [], []
d_prefix, d_suffix, d_space_prefix, d_space_suffix = {}, {}, {}, {}
nlp = spacy.load("de_core_news_sm")
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
for w in range(0, length(tokens)):
# Prefix
if length(tokens[w].text) >= n + 1:
try:
d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] += 1
except KeyError:
d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] = 1
# Suffix
if length(tokens[w].text) >= n + 1:
try:
d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] += 1
except KeyError:
d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] = 1
d_prefix_list.adding(clone.deepclone(d_prefix))
d_suffix_list.adding(clone.deepclone(d_suffix))
d_prefix.clear()
d_suffix.clear()
for i in range(0, length(row['text'])):
if row['text'][i] == " " and i + n <= length(row['text']) and i - n >= 0:
# Space-prefix
try:
d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] += 1
except KeyError:
d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] = 1
# Space-suffix
try:
d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] += 1
except KeyError:
d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] = 1
d_space_prefix_list.adding(clone.deepclone(d_space_prefix))
d_space_suffix_list.adding(clone.deepclone(d_space_suffix))
d_space_prefix.clear()
d_space_suffix.clear()
kf_pre = mk.KnowledgeFrame(d_prefix_list)
kf_su = mk.KnowledgeFrame(d_suffix_list)
kf_s_pre = mk.KnowledgeFrame(d_space_prefix_list)
kf_s_su = mk.KnowledgeFrame(d_space_suffix_list)
kf_affix = mk.concating([kf_pre, kf_su, kf_s_pre, kf_s_su], axis=1)
return kf_affix
# Get the char-word-n-grams by a defined n
def getting_char_word_n_grams(par_kf, n):
d_whole_word_list, d_mid_word_list, d_multi_word_list = [], [], []
d_whole_word, d_mid_word, d_multi_word = {}, {}, {}
match_list = []
nlp = spacy.load("de_core_news_sm")
for index, row in par_kf.traversal():
tokens = nlp(row['text'])
for w in range(0, length(tokens)):
# Whole-word
if length(tokens[w].text) == n:
try:
d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] += 1
except KeyError:
d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] = 1
# Mid-word
if length(tokens[w].text) >= n + 2:
for i in range(1, length(tokens[w].text) - n):
try:
d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] += 1
except KeyError:
d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] = 1
d_whole_word_list.adding(clone.deepclone(d_whole_word))
d_mid_word_list.adding(clone.deepclone(d_mid_word))
d_whole_word.clear()
d_mid_word.clear()
# Multi-word
# ignore special character
trimmed_text = re.sub(r'[\s]+', ' ', re.sub(r'[^\w ]+', '', row['text']))
match_list.clear()
for i in range(1, n - 1):
regex = r"\w{" + str(i) + r"}\s\w{" + str(n - 1 - i) + r"}"
match_list += re.findtotal_all(regex, trimmed_text.lower())
for match in match_list:
try:
d_multi_word["c" + str(n) + "_mw: " + match] += 1
except KeyError:
d_multi_word["c" + str(n) + "_mw: " + match] = 1
d_multi_word_list.adding(clone.deepclone(d_multi_word))
d_multi_word.clear()
kf_ww = mk.KnowledgeFrame(d_whole_word_list)
kf_miw = mk.KnowledgeFrame(d_mid_word_list)
kf_mw = | mk.KnowledgeFrame(d_multi_word_list) | pandas.DataFrame |
from __future__ import divisionision
import configparser
import logging
import os
import re
import time
from collections import OrderedDict
import numpy as np
import monkey as mk
import scipy.interpolate as itp
from joblib import Partotal_allel
from joblib import delayed
from matplotlib import pyplot as plt
from pyplanscoring.core.dicomparser import ScoringDicomParser
from pyplanscoring.core.dosimetric import read_scoring_criteria, constrains, Competition2016
from pyplanscoring.core.dvhcalculation import Structure, prepare_dvh_data, calc_dvhs_upsample_by_numd, save_dicom_dvhs, load
from pyplanscoring.core.dvhdoses import getting_dvh_getting_max
from pyplanscoring.core.geometry import getting_axis_grid, getting_interpolated_structure_planes
from pyplanscoring.core.scoring import DVHMetrics, Scoring, Participant
# TODO extract constrains from analytical curves
class CurveCompare(object):
"""
Statistical analysis of the DVH volume (%) error histograms. volume (cm 3 ) differences (numerical–analytical)
were calculated for points on the DVH curve sample_by_numd at every 10 cGy then normalized to
the structure's total volume (cm 3 ) to give the error in volume (%)
"""
def __init__(self, a_dose, a_dvh, calc_dose, calc_dvh, structure_name='', dose_grid='', gradient=''):
self.calc_data = ''
self.ref_data = ''
self.a_dose = a_dose
self.a_dvh = a_dvh
self.cal_dose = calc_dose
self.calc_dvh = calc_dvh
self.sampling_size = 10/100.0
self.dose_sample_by_nums = np.arange(0, length(calc_dvh)/100, self.sampling_size) # The DVH curve sample_by_numd at every 10 cGy
self.ref_dvh = itp.interp1d(a_dose, a_dvh, fill_value='extrapolate')
self.calc_dvh = itp.interp1d(calc_dose, calc_dvh, fill_value='extrapolate')
self.delta_dvh = self.calc_dvh(self.dose_sample_by_nums) - self.ref_dvh(self.dose_sample_by_nums)
self.delta_dvh_pp = (self.delta_dvh / a_dvh[0]) * 100
# prepare data dict
# self.calc_dvh_dict = _prepare_dvh_data(self.dose_sample_by_nums, self.calc_dvh(self.dose_sample_by_nums))
# self.ref_dvh_dict = _prepare_dvh_data(self.dose_sample_by_nums, self.ref_dvh(self.dose_sample_by_nums))
# title data
self.structure_name = structure_name
self.dose_grid = dose_grid
self.gradient = gradient
def stats(self):
kf = mk.KnowledgeFrame(self.delta_dvh_pp, columns=['delta_pp'])
print(kf.describe())
@property
def stats_paper(self):
stats = {}
stats['getting_min'] = self.delta_dvh_pp.getting_min().value_round(1)
stats['getting_max'] = self.delta_dvh_pp.getting_max().value_round(1)
stats['average'] = self.delta_dvh_pp.average().value_round(1)
stats['standard'] = self.delta_dvh_pp.standard(ddof=1).value_round(1)
return stats
@property
def stats_delta_cc(self):
stats = {}
stats['getting_min'] = self.delta_dvh.getting_min().value_round(1)
stats['getting_max'] = self.delta_dvh.getting_max().value_round(1)
stats['average'] = self.delta_dvh.average().value_round(1)
stats['standard'] = self.delta_dvh.standard(ddof=1).value_round(1)
return stats
# def getting_constrains(self, constrains_dict):
# ref_constrains = eval_constrains_dict(self.ref_dvh_dict, constrains_dict)
# calc_constrains = eval_constrains_dict(self.calc_dvh_dict, constrains_dict)
#
# return ref_constrains, calc_constrains
def eval_range(self, lim=0.2):
t1 = self.delta_dvh < -lim
t2 = self.delta_dvh > lim
ok = np.total_sum(np.logical_or(t1, t2))
pp = ok / length(self.delta_dvh) * 100
print('pp %1.2f - %i of %i ' % (pp, ok, self.delta_dvh.size))
def plot_results(self, ref_label, calc_label, title):
fig, ax = plt.subplots()
ref = self.ref_dvh(self.dose_sample_by_nums)
calc = self.calc_dvh(self.dose_sample_by_nums)
ax.plot(self.dose_sample_by_nums, ref, label=ref_label)
ax.plot(self.dose_sample_by_nums, calc, label=calc_label)
ax.set_ylabel('volume [cc]')
ax.set_xlabel('Dose [Gy]')
ax.set_title(title)
ax.legend(loc='best')
def test_real_dvh():
rs_file = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RS.1.2.246.352.71.4.584747638204.248648.20170123083029.dcm'
rd_file = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RD.1.2.246.352.71.7.584747638204.1750110.20170123082607.dcm'
rp = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/RP.1.2.246.352.71.5.584747638204.952069.20170122155706.dcm'
# dvh_file = r'/media/victor/TOURO Mobile/COMPETITION 2017/Send to Victor - Jan10 2017/Norm Res with CT Images/RD.1.2.246.352.71.7.584747638204.1746016.20170110164605.dvh'
f = r'/home/victor/Dropbox/Plan_Competition_Project/competition_2017/All Required Files - 23 Jan2017/PlanIQ Criteria TPS PlanIQ matched str names - TXT Fromat - Last mod Jan23.txt'
constrains_total_all, scores_total_all, criteria = read_scoring_criteria(f)
dose = ScoringDicomParser(filengthame=rd_file)
struc = ScoringDicomParser(filengthame=rs_file)
structures = struc.GetStructures()
ecl_DVH = dose.GetDVHs()
plt.style.use('ggplot')
st = time.time()
dvhs = {}
for structure in structures.values():
for end_cap in [False]:
if structure['id'] in ecl_DVH:
# if structure['id'] in [37, 38]:
if structure['name'] in list(scores_total_all.keys()):
ecl_dvh = ecl_DVH[structure['id']]['data']
ecl_dgetting_max = ecl_DVH[structure['id']]['getting_max'] * 100 # to cGy
struc_teste = Structure(structure, end_cap=end_cap)
# struc['planes'] = struc_teste.planes
# dicompyler_dvh = getting_dvh(structure, dose)
fig, ax = plt.subplots()
fig.set_figheight(12)
fig.set_figwidth(20)
dhist, chist = struc_teste.calculate_dvh(dose, up_sample_by_num=True)
getting_max_dose = getting_dvh_getting_max(chist)
ax.plot(dhist, chist, label='Up sample_by_numd - Dgetting_max: %1.1f cGy' % getting_max_dose)
fig.hold(True)
ax.plot(ecl_dvh, label='Eclipse - Dgetting_max: %1.1f cGy' % ecl_dgetting_max)
dvh_data = prepare_dvh_data(dhist, chist)
txt = structure['name'] + ' volume (cc): %1.1f - end_cap: %s ' % (
ecl_dvh[0], str(end_cap))
ax.set_title(txt)
# nup = getting_dvh_getting_max(dicompyler_dvh['data'])
# plt.plot(dicompyler_dvh['data'], label='Software DVH - Dgetting_max: %1.1f cGy' % nup)
ax.legend(loc='best')
ax.set_xlabel('Dose (cGy)')
ax.set_ylabel('volume (cc)')
fname = txt + '.png'
fig.savefig(fname, formating='png', dpi=100)
dvhs[structure['name']] = dvh_data
end = time.time()
print('Total elapsed Time (getting_min): ', (end - st) / 60)
def test_spacing(root_path):
"""
# TEST PLANIQ RS-DICOM DATA if z planes are not equal spaced.
:param root_path: root path
"""
root_path = r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/STRUCTURES'
structure_files = [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files if
name.endswith(('.dcm', '.DCM'))]
eps = 0.001
test_result = {}
for f in structure_files:
structures = ScoringDicomParser(filengthame=f).GetStructures()
for key in structures:
try:
total_all_z = np.array([z for z in structures[key]['planes'].keys()], dtype=float)
total_all_sorted_diff = np.diff(np.sort(total_all_z))
test = (abs((total_all_sorted_diff - total_all_sorted_diff[0])) > eps).whatever()
test_result[structures[key]['name']] = test
except:
print('Error in key:', key)
b = {key: value for key, value in test_result.items() if value == True}
return test_result
def test_planes_spacing(sPlanes):
eps = 0.001
total_all_z = np.array([z for z in sPlanes], dtype=float)
total_all_sorted_diff = np.diff(np.sort(total_all_z))
test = (abs((total_all_sorted_diff - total_all_sorted_diff[0])) > eps).whatever()
return test, total_all_sorted_diff
def test_upsample_by_numd_z_spacing(sPlanes):
z = 0.1
ordered_keys = [z for z, sPlane in sPlanes.items()]
ordered_keys.sort(key=float)
ordered_planes = np.array(ordered_keys, dtype=float)
z_interp_positions, dz = getting_axis_grid(z, ordered_planes)
hi_res_structure = getting_interpolated_structure_planes(sPlanes, z_interp_positions)
ordered_keys = [z for z, sPlane in hi_res_structure.items()]
ordered_keys.sort(key=float)
t, p = test_planes_spacing(hi_res_structure)
assert t is False
def eval_constrains_dict(dvh_data_tmp, constrains_dict):
mtk = DVHMetrics(dvh_data_tmp)
values_tmp = OrderedDict()
for ki in constrains_dict.keys():
cti = mtk.eval_constrain(ki, constrains_dict[ki])
values_tmp[ki] = cti
return values_tmp
def getting_analytical_curve(an_curves_obj, file_structure_name, column):
an_curve_i = an_curves_obj[file_structure_name.split('_')[0]]
dose_an = an_curve_i['Dose (cGy)'].values
an_dvh = an_curve_i[column].values # check nonzero
idx = np.nonzero(an_dvh) # remove 0 volumes from DVH
dose_range, cdvh = dose_an[idx], an_dvh[idx]
return dose_range, cdvh
def calc_data(row, dose_files_dict, structure_dict, constrains, calculation_options):
idx, values = row[0], row[1]
s_name = values['Structure name']
voxel = str(values['Dose Voxel (mm)'])
gradient = values['Gradient direction']
dose_file = dose_files_dict[gradient][voxel]
struc_file = structure_dict[s_name]
# getting structure and dose
dicom_dose = ScoringDicomParser(filengthame=dose_file)
struc = ScoringDicomParser(filengthame=struc_file)
structures = struc.GetStructures()
structure = structures[2]
# set end cap by 1/2 slice thickness
calculation_options['end_cap'] = structure['thickness'] / 2.0
# set up sample_by_numd structure
struc_teste = Structure(structure, calculation_options)
dhist, chist = struc_teste.calculate_dvh(dicom_dose)
dvh_data = struc_teste.getting_dvh_data()
# Setup DVH metrics class and getting DVH DATA
metrics = DVHMetrics(dvh_data)
values_constrains = OrderedDict()
for k in constrains.keys():
ct = metrics.eval_constrain(k, constrains[k])
values_constrains[k] = ct
values_constrains['Gradient direction'] = gradient
# Get data
return mk.Collections(values_constrains, name=voxel), s_name
def calc_data_total_all(row, dose_files_dict, structure_dict, constrains, an_curves, col_grad_dict, delta_mm=(0.2, 0.2, 0.2),
end_cap=True, up_sample_by_num=True):
idx, values = row[0], row[1]
s_name = values['Structure name']
voxel = str(values['Dose Voxel (mm)'])
gradient = values['Gradient direction']
dose_file = dose_files_dict[gradient][voxel]
struc_file = structure_dict[s_name]
# getting structure and dose
dicom_dose = ScoringDicomParser(filengthame=dose_file)
struc = ScoringDicomParser(filengthame=struc_file)
structures = struc.GetStructures()
structure = structures[2]
# set up sample_by_numd structure
struc_teste = Structure(structure)
struc_teste.set_delta(delta_mm)
dhist, chist = struc_teste.calculate_dvh(dicom_dose)
# getting its columns from spreadsheet
column = col_grad_dict[gradient][voxel]
adose_range, advh = getting_analytical_curve(an_curves, s_name, column)
# use CurveCompare class to eval similarity from calculated and analytical curves
cmp = CurveCompare(adose_range, advh, dhist, chist, s_name, voxel, gradient)
ref_constrains, calc_constrains = cmp.getting_constrains(constrains)
ref_constrains['Gradient direction'] = gradient
calc_constrains['Gradient direction'] = gradient
ref_collections = mk.Collections(ref_constrains, name=voxel)
calc_collections = mk.Collections(calc_constrains, name=voxel)
return ref_collections, calc_collections, s_name, cmp
def test11(delta_mm=(0.2, 0.2, 0.1), plot_curves=False):
# TEST DICOM DATA
structure_files = ['/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Spheres/Sphere_02_0.dcm',
'/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cylinders/Cylinder_02_0.dcm',
'/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cylinders/RtCylinder_02_0.dcm',
'/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cones/Cone_02_0.dcm',
'/home/victor/Downloads/DVH-Analysis-Data-Etc/STRUCTURES/Cones/RtCone_02_0.dcm']
structure_name = ['Sphere_02_0', 'Cylinder_02_0', 'RtCylinder_02_0', 'Cone__02_0', 'RtCone_02_0']
dose_files = [
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_0-4_0-2_0-4_mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_1mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_2mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_AntPost_3mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_0-4_0-2_0-4_mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_1mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_2mm_Aligned.dcm',
r'/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS/Linear_SupInf_3mm_Aligned.dcm']
# Structure Dict
structure_dict = dict(zip(structure_name, structure_files))
# dose dict
dose_files_dict = {
'Z(AP)': {'0.4x0.2x0.4': dose_files[0], '1': dose_files[1], '2': dose_files[2], '3': dose_files[3]},
'Y(SI)': {'0.4x0.2x0.4': dose_files[4], '1': dose_files[5], '2': dose_files[6], '3': dose_files[7]}}
sheets = ['Sphere', 'Cylinder', 'RtCylinder', 'Cone', 'RtCone']
col_grad_dict = {'Z(AP)': {'0.4x0.2x0.4': 'AP 0.2 mm', '1': 'AP 1 mm', '2': 'AP 2 mm', '3': 'AP 3 mm'},
'Y(SI)': {'0.4x0.2x0.4': 'SI 0.2 mm', '1': 'SI 1 mm', '2': 'SI 2 mm', '3': 'SI 3 mm'}}
# grab analytical data
sheet = 'Analytical'
ref_path = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/analytical_data.xlsx'
kf = mk.read_excel(ref_path, sheetname=sheet)
mask = kf['CT slice spacing (mm)'] == '0.2mm'
kf = kf.loc[mask]
# Constrains to getting data
# Constrains
constrains = OrderedDict()
constrains['Total_Volume'] = True
constrains['getting_min'] = 'getting_min'
constrains['getting_max'] = 'getting_max'
constrains['average'] = 'average'
constrains['D99'] = 99
constrains['D95'] = 95
constrains['D5'] = 5
constrains['D1'] = 1
constrains['Dcc'] = 0.03
# Get total_all analytical curves
out = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/analytical_dvh.obj'
an_curves = load(out)
res = Partotal_allel(n_jobs=-1, verbose=11)(
delayed(calc_data_total_all)(row,
dose_files_dict,
structure_dict,
constrains,
an_curves,
col_grad_dict,
delta_mm=delta_mm) for row in kf.traversal())
ref_results = [d[0] for d in res]
calc_results = [d[1] for d in res]
sname = [d[2] for d in res]
curves = [d[3] for d in res]
kf_ref_results = mk.concating(ref_results, axis=1).T.reseting_index()
kf_calc_results = mk.concating(calc_results, axis=1).T.reseting_index()
kf_ref_results['Structure name'] = sname
kf_calc_results['Structure name'] = sname
ref_num = kf_ref_results[kf_ref_results.columns[1:-2]]
calc_num = kf_calc_results[kf_calc_results.columns[1:-2]]
delta = ((calc_num - ref_num) / ref_num) * 100
res = OrderedDict()
lim = 3
for col in delta:
count = np.total_sum(np.abs(delta[col]) > lim)
rg = np.array([value_round(delta[col].getting_min(), 2), value_round(delta[col].getting_max(), 2)])
res[col] = {'count': count, 'range': rg}
test_table = mk.KnowledgeFrame(res).T
print(test_table)
if plot_curves:
for c in curves:
c.plot_results()
plt.show()
def test22(delta_mm=(0.1, 0.1, 0.1), up_sample_by_num=True, plot_curves=True):
ref_data = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/analytical_data.xlsx'
struc_dir = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/STRUCTURES'
dose_grid_dir = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/DVH-Analysis-Data-Etc/DOSE GRIDS'
#
# ref_data = r'D:\Dropbox\Plan_Competit
st = 2
snames = ['Sphere_10_0', 'Sphere_20_0', 'Sphere_30_0',
'Cylinder_10_0', 'Cylinder_20_0', 'Cylinder_30_0',
'RtCylinder_10_0', 'RtCylinder_20_0', 'RtCylinder_30_0',
'Cone_10_0', 'Cone_20_0', 'Cone_30_0',
'RtCone_10_0', 'RtCone_20_0', 'RtCone_30_0']
structure_path = [os.path.join(struc_dir, f + '.dcm') for f in snames]
structure_dict = dict(zip(snames, structure_path))
dose_files = [os.path.join(dose_grid_dir, f) for f in [
'Linear_AntPost_1mm_Aligned.dcm',
'Linear_AntPost_2mm_Aligned.dcm',
'Linear_AntPost_3mm_Aligned.dcm',
'Linear_SupInf_1mm_Aligned.dcm',
'Linear_SupInf_2mm_Aligned.dcm',
'Linear_SupInf_3mm_Aligned.dcm']]
# dose dict
dose_files_dict = {
'Z(AP)': {'1': dose_files[0], '2': dose_files[1], '3': dose_files[2]},
'Y(SI)': {'1': dose_files[3], '2': dose_files[4], '3': dose_files[5]}}
col_grad_dict = {'Z(AP)': {'0.4x0.2x0.4': 'AP 0.2 mm', '1': 'AP 1 mm', '2': 'AP 2 mm', '3': 'AP 3 mm'},
'Y(SI)': {'0.4x0.2x0.4': 'SI 0.2 mm', '1': 'SI 1 mm', '2': 'SI 2 mm', '3': 'SI 3 mm'}}
# grab analytical data
out = '/home/victor/Dropbox/Plan_Competition_Project/pyplanscoring/testandardata/analytical_dvh.obj'
an_curves = load(out)
kf = mk.read_excel(ref_data, sheetname='Analytical')
kfi = kf.ix[40:]
mask0 = kfi['Structure Shift'] == 0
kfi = kfi.loc[mask0]
# Constrains to getting data
# Constrains
constrains = OrderedDict()
constrains['Total_Volume'] = True
constrains['getting_min'] = 'getting_min'
constrains['getting_max'] = 'getting_max'
constrains['average'] = 'average'
constrains['D99'] = 99
constrains['D95'] = 95
constrains['D5'] = 5
constrains['D1'] = 1
constrains['Dcc'] = 0.03
# GET CALCULATED DATA
# backend = 'threading'
res = Partotal_allel(n_jobs=-1, verbose=11)(
delayed(calc_data_total_all)(row,
dose_files_dict,
structure_dict,
constrains,
an_curves,
col_grad_dict,
delta_mm=delta_mm,
up_sample_by_num=up_sample_by_num) for row in kfi.traversal())
ref_results = [d[0] for d in res]
calc_results = [d[1] for d in res]
sname = [d[2] for d in res]
curves = [d[3] for d in res]
kf_ref_results = mk.concating(ref_results, axis=1).T.reseting_index()
kf_calc_results = mk.concating(calc_results, axis=1).T.reseting_index()
kf_ref_results['Structure name'] = sname
kf_calc_results['Structure name'] = sname
ref_num = kf_ref_results[kf_ref_results.columns[1:-2]]
calc_num = kf_calc_results[kf_calc_results.columns[1:-2]]
delta = ((calc_num - ref_num) / ref_num) * 100
res = OrderedDict()
lim = 3
for col in delta:
count = np.total_sum(np.abs(delta[col]) > lim)
rg = np.array([value_round(delta[col].getting_min(), 2), value_round(delta[col].getting_max(), 2)])
res[col] = {'count': count, 'range': rg}
test_table = | mk.KnowledgeFrame(res) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD
"""
Toolset working with yahoo finance data
Module includes functions for easy access to YahooFinance data
"""
import urllib.request
import numpy as np
import requests # interaction with the web
import os # file system operations
import yaml # human-friendly data formating
import re # regular expressions
import monkey as mk # monkey... the best time collections library out there
import datetime as dt # date and time functions
import io
from .extra import ProgressBar
dateTimeFormat = "%Y%m%d %H:%M:%S"
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
def gettingQuote(symbols):
"""
getting current yahoo quote
Parameters
-----------
symbols : list of str
list of ticker symbols
Returns
-----------
KnowledgeFrame , data is row-wise
"""
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not incontainstance(symbols,list):
symbols = [symbols]
header_numer = ['symbol','final_item','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(list(zip(header_numer,[[] for i in range(length(header_numer))])))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib.request.urlopen(urlStr).readlines()
except Exception as e:
s = "Failed to download:\n{0}".formating(e);
print(s)
for line in lines:
fields = line.decode().strip().split(',')
#print fields, length(fields)
for i,field in enumerate(fields):
data[header_numer[i]].adding( parseStr(field))
idx = data.pop('symbol')
return | mk.KnowledgeFrame(data,index=idx) | pandas.DataFrame |
from __future__ import divisionision
from functools import wraps
import monkey as mk
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventutotal_ally be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = mk.Collections([], dtype='object')
self.com_name = mk.Collections([], dtype='object')
self.taxa = mk.Collections([], dtype='object')
self.order = mk.Collections([], dtype='object')
self.usfws_id = mk.Collections([], dtype='object')
self.body_wgt = mk.Collections([], dtype='object')
self.diet_item = mk.Collections([], dtype='object')
self.h2o_cont = mk.Collections([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replacingd with
# a method to access a SQL database containing the properties
#filengthame = './ted/tests/TEDSpeciesProperties.csv'
filengthame = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filengthame,'rt') as csvfile:
# csv.DictReader uses first line in file for column header_numings by default
dr = mk.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filengthame, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Monkey KnowledgeFrame
self.chemical_name = mk.Collections([], dtype="object", name="chemical_name")
# application parameters for getting_min/getting_max application scenarios
self.crop_getting_min = mk.Collections([], dtype="object", name="crop")
self.app_method_getting_min = mk.Collections([], dtype="object", name="app_method_getting_min")
self.app_rate_getting_min = mk.Collections([], dtype="float", name="app_rate_getting_min")
self.num_apps_getting_min = mk.Collections([], dtype="int", name="num_apps_getting_min")
self.app_interval_getting_min = mk.Collections([], dtype="int", name="app_interval_getting_min")
self.siplet_spec_getting_min = mk.Collections([], dtype="object", name="siplet_spec_getting_min")
self.boom_hgt_getting_min = mk.Collections([], dtype="object", name="siplet_spec_getting_min")
self.pest_incorp_depth_getting_min = mk.Collections([], dtype="object", name="pest_incorp_depth")
self.crop_getting_max = mk.Collections([], dtype="object", name="crop")
self.app_method_getting_max = mk.Collections([], dtype="object", name="app_method_getting_max")
self.app_rate_getting_max = mk.Collections([], dtype="float", name="app_rate_getting_max")
self.num_apps_getting_max = mk.Collections([], dtype="int", name="num_app_getting_maxs")
self.app_interval_getting_max = mk.Collections([], dtype="int", name="app_interval_getting_max")
self.siplet_spec_getting_max = mk.Collections([], dtype="object", name="siplet_spec_getting_max")
self.boom_hgt_getting_max = mk.Collections([], dtype="object", name="siplet_spec_getting_max")
self.pest_incorp_depth_getting_max = mk.Collections([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = mk.Collections([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = mk.Collections([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = mk.Collections([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = mk.Collections([], dtype="float", name="frac_retained_birds")
self.log_kow = mk.Collections([], dtype="float", name="log_kow")
self.koc = mk.Collections([], dtype="float", name="koc")
self.solubility = mk.Collections([], dtype="float", name="solubility")
self.henry_law_const = mk.Collections([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_average = mk.Collections([], dtype="float", name="aq_plant_algae_bcf_average")
self.aq_plant_algae_bcf_upper = mk.Collections([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_average = mk.Collections([], dtype="float", name="inv_bcf_average")
self.inv_bcf_upper = mk.Collections([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_average = mk.Collections([], dtype="float", name="fish_bcf_average")
self.fish_bcf_upper = mk.Collections([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = mk.Collections([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = mk.Collections([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# nagetting_ming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = mk.Collections([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = mk.Collections([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = mk.Collections([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = mk.Collections([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = mk.Collections([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = mk.Collections([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = mk.Collections([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = mk.Collections([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = mk.Collections([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = mk.Collections([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = mk.Collections([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = mk.Collections([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = mk.Collections([], dtype="float", name="dbt_bird_sub_indirect")
self.getting_mineau_sca_fact = mk.Collections([], dtype="float", name="getting_mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = mk.Collections([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.getting_mineau_sca_fact_wgt = mk.Collections([], dtype="float", name="getting_mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = mk.Collections([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = mk.Collections([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = mk.Collections([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = mk.Collections([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = mk.Collections([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = mk.Collections([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = mk.Collections([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = mk.Collections([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = mk.Collections([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = mk.Collections([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = mk.Collections([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = mk.Collections([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = mk.Collections([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = mk.Collections([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = mk.Collections([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = mk.Collections([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = mk.Collections([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = mk.Collections([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = mk.Collections([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = mk.Collections([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = mk.Collections([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = mk.Collections([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = mk.Collections([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = mk.Collections([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = mk.Collections([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = mk.Collections([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = mk.Collections([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = mk.Collections([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = mk.Collections([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = mk.Collections([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = mk.Collections([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = mk.Collections([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = mk.Collections([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = mk.Collections([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = mk.Collections([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = mk.Collections([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = mk.Collections([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = mk.Collections([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = mk.Collections([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = mk.Collections([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = mk.Collections([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = mk.Collections([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = mk.Collections([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = mk.Collections([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = mk.Collections([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = mk.Collections([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = mk.Collections([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = mk.Collections([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = mk.Collections([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = mk.Collections([], dtype="float", name="cbt_inv_soil_1inmill_mort")
self.cbt_inv_soil_1inten_mort = mk.Collections([], dtype="float", name="cbt_inv_soil_1inten_mort")
self.cbt_inv_soil_low_lc50 = mk.Collections([], dtype="float", name="cbt_inv_soil_low_lc50")
self.cbt_inv_soil_sub_direct = mk.Collections([], dtype="float", name="cbt_inv_soil_sub_direct")
self.cbt_inv_soil_grow_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_grow_noec")
self.cbt_inv_soil_grow_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_grow_loec")
self.cbt_inv_soil_repro_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_repro_noec")
self.cbt_inv_soil_repro_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_repro_loec")
self.cbt_inv_soil_behav_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_behav_noec")
self.cbt_inv_soil_behav_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_behav_loec")
self.cbt_inv_soil_sensory_noec = mk.Collections([], dtype="float", name="cbt_inv_soil_sensory_noec")
self.cbt_inv_soil_sensory_loec = mk.Collections([], dtype="float", name="cbt_inv_soil_sensory_loec")
self.cbt_inv_soil_sub_indirect = mk.Collections([], dtype="float", name="cbt_inv_soil_sub_indirect")
# application rate-based toxicity (arbt) : mammals (lbs active ingredient/Acre)
self.arbt_mamm_mort = mk.Collections([], dtype="float", name="arbt_mamm_mort")
self.arbt_mamm_growth = mk.Collections([], dtype="float", name="arbt_mamm_growth")
self.arbt_mamm_repro = mk.Collections([], dtype="float", name="arbt_mamm_repro")
self.arbt_mamm_behav = mk.Collections([], dtype="float", name="arbt_mamm_behav")
self.arbt_mamm_sensory = mk.Collections([], dtype="float", name="arbt_mamm_sensory")
# application rate-based toxicity (arbt) : birds (lbs active ingredient/Acre)
self.arbt_bird_mort = mk.Collections([], dtype="float", name="arbt_bird_mort")
self.arbt_bird_growth = mk.Collections([], dtype="float", name="arbt_bird_growth")
self.arbt_bird_repro = mk.Collections([], dtype="float", name="arbt_bird_repro")
self.arbt_bird_behav = mk.Collections([], dtype="float", name="arbt_bird_behav")
self.arbt_bird_sensory = mk.Collections([], dtype="float", name="arbt_bird_sensory")
# application rate-based toxicity (arbt) : reptiles (lbs active ingredient/Acre)
self.arbt_reptile_mort = mk.Collections([], dtype="float", name="arbt_reptile_mort")
self.arbt_reptile_growth = mk.Collections([], dtype="float", name="arbt_reptile_growth")
self.arbt_reptile_repro = mk.Collections([], dtype="float", name="arbt_reptile_repro")
self.arbt_reptile_behav = mk.Collections([], dtype="float", name="arbt_reptile_behav")
self.arbt_reptile_sensory = mk.Collections([], dtype="float", name="arbt_reptile_sensory")
# application rate-based toxicity (arbt) : invertebrates (lbs active ingredient/Acre)
self.arbt_inv_1inmill_mort = mk.Collections([], dtype="float", name="arbt_inv_1inmill_mort")
self.arbt_inv_1inten_mort = mk.Collections([], dtype="float", name="arbt_inv_1inten_mort")
self.arbt_inv_sub_direct = mk.Collections([], dtype="float", name="arbt_inv_sub_direct")
self.arbt_inv_sub_indirect = mk.Collections([], dtype="float", name="arbt_inv_sub_indirect")
self.arbt_inv_growth = mk.Collections([], dtype="float", name="arbt_inv_growth")
self.arbt_inv_repro = mk.Collections([], dtype="float", name="arbt_inv_repro")
self.arbt_inv_behav = mk.Collections([], dtype="float", name="arbt_inv_behav")
self.arbt_inv_sensory = | mk.Collections([], dtype="float", name="arbt_inv_sensory") | pandas.Series |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import total_allocate_fips_location_system
from flowsa.location import US_FIPS
import math
import monkey as mk
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gtotal_allium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gyptotal_sum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgflat_underlyingconstruction": "2013-2017",
"sandgflat_underlyingindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the final_item file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for total_all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "gvalue_round"
data["Context"] = None
data["ActivityContotal_sumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eligetting_minates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replacingd with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to ctotal_all, concating, parse, formating into Flow-By-Activity
formating
"""
return [build_url]
def usgs_asbestos_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[4:11]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data.columns) > 12:
for x in range(12, length(kf_data.columns)):
col_name = "Unnamed: " + str(x)
del kf_data[col_name]
if length(kf_data. columns) == 12:
kf_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_asbestos_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:":
product = "imports"
elif kf.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(kf.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(knowledgeframe,
str(year))
return knowledgeframe
def usgs_barite_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:14]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data. columns) == 11:
kf_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_barite_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:3":
product = "imports"
elif kf.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif kf.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(kf.iloc[index][col_name]) == "--" or \
str(kf.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_bauxite_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[6:14]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if length(kf_data_one. columns) == 11:
kf_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
frames = [kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_bauxite_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption, as shipped:":
prod = "import"
elif kf.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(kf.iloc[index][col_name])
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_beryllium_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_1 = mk.KnowledgeFrame(kf_raw_data_two.loc[6:9]).reindexing()
kf_data_1 = kf_data_1.reseting_index()
del kf_data_1["index"]
kf_data_2 = mk.KnowledgeFrame(kf_raw_data.loc[12:12]).reindexing()
kf_data_2 = kf_data_2.reseting_index()
del kf_data_2["index"]
if length(kf_data_2.columns) > 11:
for x in range(11, length(kf_data_2.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_2[col_name]
if length(kf_data_1. columns) == 11:
kf_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if length(kf_data_2. columns) == 11:
kf_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in kf_data_1.columns:
if col not in col_to_use:
del kf_data_1[col]
for col in kf_data_2.columns:
if col not in col_to_use:
del kf_data_2[col]
frames = [kf_data_1, kf_data_2]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_beryllium_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for contotal_sumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for kf in kf_list:
for index, row in kf.traversal():
prod = "production"
if kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption, beryl2":
prod = "imports"
if kf.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = kf.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_boron_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data.loc[8:8]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
kf_data_two = mk.KnowledgeFrame(kf_raw_data.loc[21:22]).reindexing()
kf_data_two = kf_data_two.reseting_index()
del kf_data_two["index"]
kf_data_three = mk.KnowledgeFrame(kf_raw_data.loc[27:28]).reindexing()
kf_data_three = kf_data_three.reseting_index()
del kf_data_three["index"]
if length(kf_data_one. columns) == 11:
kf_data_one.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
kf_data_two.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
kf_data_three.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['boron'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
del kf_data_two[col]
del kf_data_three[col]
frames = [kf_data_one, kf_data_two, kf_data_three]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_boron_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["B2O3 content", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['boron'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "B2O3 content" or \
kf.iloc[index]["Production"].strip() == "Quantity":
product = "production"
if kf.iloc[index]["Production"].strip() == "Colemanite:4":
des = "Colemanite"
elif kf.iloc[index]["Production"].strip() == "Ulexite:4":
des = "Ulexite"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if des == name:
data['FlowName'] = name + " " + product
else:
data['FlowName'] = name + " " + product + " " + des
data["Description"] = des
data["ActivityProducedBy"] = name
if str(kf.iloc[index][col_name]) == "--" or \
str(kf.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
elif str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_chromium_ctotal_all(*, resp, year, **_):
""""
Convert response for ctotal_alling url to monkey knowledgeframe,
begin parsing kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[4:24]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data. columns) == 12:
kf_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
elif length(kf_data. columns) == 13:
kf_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['chromium'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_chromium_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary2", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Imports:":
product = "imports"
elif kf.iloc[index]["Production"].strip() == "Secondary2":
product = "production"
elif kf.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
if str(kf.iloc[index][col_name]) == "--" or \
str(kf.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_clay_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_btotal_all = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T3')
kf_data_btotal_all = mk.KnowledgeFrame(kf_raw_data_btotal_all.loc[19:19]).reindexing()
kf_data_btotal_all = kf_data_btotal_all.reseting_index()
del kf_data_btotal_all["index"]
kf_raw_data_bentonite = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4 ')
kf_data_bentonite = mk.KnowledgeFrame(
kf_raw_data_bentonite.loc[28:28]).reindexing()
kf_data_bentonite = kf_data_bentonite.reseting_index()
del kf_data_bentonite["index"]
kf_raw_data_common = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T5 ')
kf_data_common = mk.KnowledgeFrame(kf_raw_data_common.loc[40:40]).reindexing()
kf_data_common = kf_data_common.reseting_index()
del kf_data_common["index"]
kf_raw_data_fire = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T6 ')
kf_data_fire = mk.KnowledgeFrame(kf_raw_data_fire.loc[12:12]).reindexing()
kf_data_fire = kf_data_fire.reseting_index()
del kf_data_fire["index"]
kf_raw_data_fuller = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7 ')
kf_data_fuller = mk.KnowledgeFrame(kf_raw_data_fuller.loc[17:17]).reindexing()
kf_data_fuller = kf_data_fuller.reseting_index()
del kf_data_fuller["index"]
kf_raw_data_kaolin = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8 ')
kf_data_kaolin = mk.KnowledgeFrame(kf_raw_data_kaolin.loc[18:18]).reindexing()
kf_data_kaolin = kf_data_kaolin.reseting_index()
del kf_data_kaolin["index"]
kf_raw_data_export = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T13')
kf_data_export = mk.KnowledgeFrame(kf_raw_data_export.loc[6:15]).reindexing()
kf_data_export = kf_data_export.reseting_index()
del kf_data_export["index"]
kf_raw_data_import = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T14')
kf_data_import = mk.KnowledgeFrame(kf_raw_data_import.loc[6:13]).reindexing()
kf_data_import = kf_data_import.reseting_index()
del kf_data_import["index"]
kf_data_btotal_all.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
kf_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
kf_data_common.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
kf_data_fire.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
kf_data_fuller.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
kf_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
kf_data_export.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
kf_data_import.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
kf_data_btotal_all["type"] = "Btotal_all clay"
kf_data_bentonite["type"] = "Bentonite"
kf_data_common["type"] = "Common clay"
kf_data_fire["type"] = "Fire clay"
kf_data_fuller["type"] = "Fuller’s earth"
kf_data_kaolin["type"] = "Kaolin"
kf_data_export["type"] = "export"
kf_data_import["type"] = "import"
col_to_use = ["Production", "type"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['clay'], year))
for col in kf_data_import.columns:
if col not in col_to_use:
del kf_data_import[col]
del kf_data_export[col]
for col in kf_data_btotal_all.columns:
if col not in col_to_use:
del kf_data_btotal_all[col]
del kf_data_bentonite[col]
del kf_data_common[col]
del kf_data_fire[col]
del kf_data_fuller[col]
del kf_data_kaolin[col]
frames = [kf_data_import, kf_data_export, kf_data_btotal_all, kf_data_bentonite,
kf_data_common, kf_data_fire, kf_data_fuller, kf_data_kaolin]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_clay_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Btotal_all clay", "Bentonite", "Fire clay", "Kaolin",
"Fuller’s earth", "Total", "Grand total",
"Artificitotal_ally activated clay and earth",
"Clays, not elsewhere classified",
"Clays, not elsewhere classified"]
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["type"].strip() == "import":
product = "imports"
elif kf.iloc[index]["type"].strip() == "export":
product = "exports"
else:
product = "production"
if str(kf.iloc[index]["Production"]).strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if product == "production":
data['FlowName'] = \
kf.iloc[index]["type"].strip() + " " + product
data["Description"] = kf.iloc[index]["type"].strip()
data["ActivityProducedBy"] = kf.iloc[index]["type"].strip()
else:
data['FlowName'] = \
kf.iloc[index]["Production"].strip() + " " + product
data["Description"] = kf.iloc[index]["Production"].strip()
data["ActivityProducedBy"] = \
kf.iloc[index]["Production"].strip()
col_name = usgs_myb_year(YEARS_COVERED['clay'], year)
if str(kf.iloc[index][col_name]) == "--" or \
str(kf.iloc[index][col_name]) == "(3)" or \
str(kf.iloc[index][col_name]) == "(2)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_cobalt_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_1 = mk.KnowledgeFrame(kf_raw_data_two.loc[6:11]).reindexing()
kf_data_1 = kf_data_1.reseting_index()
del kf_data_1["index"]
kf_data_2 = mk.KnowledgeFrame(kf_raw_data.loc[23:23]).reindexing()
kf_data_2 = kf_data_2.reseting_index()
del kf_data_2["index"]
if length(kf_data_2.columns) > 11:
for x in range(11, length(kf_data_2.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_2[col_name]
if length(kf_data_1. columns) == 12:
kf_data_1.columns = ["Production", "space_6", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
if length(kf_data_2. columns) == 11:
kf_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['cobalt'], year))
for col in kf_data_1.columns:
if col not in col_to_use:
del kf_data_1[col]
for col in kf_data_2.columns:
if col not in col_to_use:
del kf_data_2[col]
frames = [kf_data_1, kf_data_2]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_cobalt_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["United Statese, 16, 17", "Mine productione",
"Imports for contotal_sumption", "Exports"]
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
prod = "production"
if kf.iloc[index]["Production"].strip() == \
"United Statese, 16, 17":
prod = "production"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption":
prod = "imports"
elif kf.iloc[index]["Production"].strip() == "Exports":
prod = "exports"
if kf.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = kf.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(kf.iloc[index][col_name])
remove_rows = ["(18)", "(2)"]
if data["FlowAmount"] not in remove_rows:
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_copper_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin
parsing kf into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_1 = mk.KnowledgeFrame(kf_raw_data.loc[12:12]).reindexing()
kf_data_1 = kf_data_1.reseting_index()
del kf_data_1["index"]
kf_data_2 = mk.KnowledgeFrame(kf_raw_data.loc[30:31]).reindexing()
kf_data_2 = kf_data_2.reseting_index()
del kf_data_2["index"]
if length(kf_data_1. columns) == 12:
kf_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
kf_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Unit"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['copper'], year))
for col in kf_data_1.columns:
if col not in col_to_use:
del kf_data_1[col]
for col in kf_data_2.columns:
if col not in col_to_use:
del kf_data_2[col]
frames = [kf_data_1, kf_data_2]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_copper_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
remove_digits = str.maketrans('', '', digits)
product = kf.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
if product == "Total":
prod = "production"
elif product == "Exports, refined":
prod = "exports"
elif product == "Imports, refined":
prod = "imports"
data["ActivityProducedBy"] = "Copper; Mine"
data['FlowName'] = name + " " + prod
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['copper'], year)
data["Description"] = "Copper; Mine"
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_diatomite_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[7:10]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if length(kf_data_one.columns) == 10:
kf_data_one.columns = ["Production", "year_1", "space_2", "year_2",
"space_3", "year_3", "space_4", "year_4",
"space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['diatomite'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
frames = [kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_diatomite_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports2", "Imports for contotal_sumption2"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports2":
prod = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption2":
prod = "imports"
elif kf.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand metric tons"
col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year)
data["FlowAmount"] = str(kf.iloc[index][col_name])
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_feldspar_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin
parsing kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_two = mk.KnowledgeFrame(kf_raw_data_two.loc[4:8]).reindexing()
kf_data_two = kf_data_two.reseting_index()
del kf_data_two["index"]
kf_data_one = mk.KnowledgeFrame(kf_raw_data_two.loc[10:15]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if length(kf_data_two. columns) == 13:
kf_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
kf_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['feldspar'], year))
for col in kf_data_two.columns:
if col not in col_to_use:
del kf_data_two[col]
del kf_data_one[col]
frames = [kf_data_two, kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_feldspar_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports, feldspar:4":
prod = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:4":
prod = "imports"
elif kf.iloc[index]["Production"].strip() == \
"Production, feldspar:e, 2":
prod = "production"
elif kf.iloc[index]["Production"].strip() == "Nepheline syenite:":
prod = "production"
des = "Nepheline syenite"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year)
data["FlowAmount"] = str(kf.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
if name == des:
data['FlowName'] = name + " " + prod
else:
data['FlowName'] = name + " " + prod + " " + des
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_fluorspar_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin
parsing kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
if year in YEARS_COVERED['fluorspar_inports']:
kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
kf_raw_data_three = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7')
kf_raw_data_four = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[5:15]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if year in YEARS_COVERED['fluorspar_inports']:
kf_data_two = mk.KnowledgeFrame(kf_raw_data_two.loc[7:8]).reindexing()
kf_data_three = mk.KnowledgeFrame(kf_raw_data_three.loc[19:19]).reindexing()
kf_data_four = mk.KnowledgeFrame(kf_raw_data_four.loc[11:11]).reindexing()
if length(kf_data_two.columns) == 13:
kf_data_two.columns = ["Production", "space_1", "not_1", "space_2",
"not_2", "space_3", "not_3", "space_4",
"not_4", "space_5", "year_4", "space_6",
"year_5"]
if length(kf_data_three.columns) == 9:
kf_data_three.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
kf_data_four.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
if length(kf_data_one. columns) == 13:
kf_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['fluorspar'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
if year in YEARS_COVERED['fluorspar_inports']:
for col in kf_data_two.columns:
if col not in col_to_use:
del kf_data_two[col]
for col in kf_data_three.columns:
if col not in col_to_use:
del kf_data_three[col]
for col in kf_data_four.columns:
if col not in col_to_use:
del kf_data_four[col]
kf_data_one["type"] = "data_one"
if year in YEARS_COVERED['fluorspar_inports']:
# alugetting_minum fluoride
# cryolite
kf_data_two["type"] = "data_two"
kf_data_three["type"] = "Alugetting_minum Fluoride"
kf_data_four["type"] = "Cryolite"
frames = [kf_data_one, kf_data_two, kf_data_three, kf_data_four]
else:
frames = [kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_fluorspar_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3", "Total", "Hydrofluoric acid",
"Mettotal_allurgical", "Production"]
prod = ""
name = usgs_myb_name(source)
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports:3":
prod = "exports"
des = name
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:3":
prod = "imports"
des = name
elif kf.iloc[index]["Production"].strip() == "Fluorosilicic acid:":
prod = "production"
des = "Fluorosilicic acid:"
if str(kf.iloc[index]["type"]).strip() == "data_two":
prod = "imports"
des = kf.iloc[index]["Production"].strip()
elif str(kf.iloc[index]["type"]).strip() == \
"Alugetting_minum Fluoride" or \
str(kf.iloc[index]["type"]).strip() == "Cryolite":
prod = "imports"
des = kf.iloc[index]["type"].strip()
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['fluorspar'], year)
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_gtotal_allium_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[5:7]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data.columns) > 11:
for x in range(11, length(kf_data.columns)):
col_name = "Unnamed: " + str(x)
del kf_data[col_name]
if length(kf_data.columns) == 11:
kf_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['gtotal_allium'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_gtotal_allium_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production, primary crude", "Metal"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['gtotal_allium'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:":
product = "imports"
elif kf.iloc[index]["Production"].strip() == \
"Production, primary crude":
product = "production"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Kilograms"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gtotal_allium'], year)
if str(kf.iloc[index][col_name]).strip() == "--":
data["FlowAmount"] = str(0)
elif str(kf.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_garnet_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_two = mk.KnowledgeFrame(kf_raw_data_two.loc[4:5]).reindexing()
kf_data_two = kf_data_two.reseting_index()
del kf_data_two["index"]
kf_data_one = mk.KnowledgeFrame(kf_raw_data_two.loc[10:14]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if length(kf_data_one.columns) > 13:
for x in range(13, length(kf_data_one.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_one[col_name]
del kf_data_two[col_name]
if length(kf_data_two. columns) == 13:
kf_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
kf_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['garnet'], year))
for col in kf_data_two.columns:
if col not in col_to_use:
del kf_data_two[col]
del kf_data_one[col]
frames = [kf_data_two, kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_garnet_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports:2":
prod = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption: 3":
prod = "imports"
elif kf.iloc[index]["Production"].strip() == "Crude production:":
prod = "production"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['garnet'], year)
data["FlowAmount"] = str(kf.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_gold_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[6:14]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data.columns) == 13:
kf_data.columns = ["Production", "Space", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['gold'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_gold_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports, refined bullion",
"Imports for contotal_sumption, refined bullion"]
knowledgeframe = mk.KnowledgeFrame()
product = "production"
name = usgs_myb_name(source)
des = name
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Quantity":
product = "production"
elif kf.iloc[index]["Production"].strip() == \
"Exports, refined bullion":
product = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption, refined bullion":
product = "imports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "kilograms"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gold'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_graphite_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[5:9]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data. columns) == 13:
kf_data.columns = ["Production", "space_1", "Unit", "space_6",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['graphite'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_graphite_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantiy", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:":
product = "imports"
elif kf.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(kf.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_gyptotal_sum_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin
parsing kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[7:10]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if length(kf_data_one.columns) > 11:
for x in range(11, length(kf_data_one.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_one[col_name]
if length(kf_data_one.columns) == 11:
kf_data_one.columns = ["Production", "space_1", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['gyptotal_sum'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
frames = [kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_gyptotal_sum_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Imports for contotal_sumption"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['gyptotal_sum'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption":
prod = "imports"
elif kf.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(kf.iloc[index][col_name])
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_iodine_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[6:10]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data. columns) == 11:
kf_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
elif length(kf_data. columns) == 13:
kf_data.columns = ["Production", "unit", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['iodine'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_iodine_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Quantity, for contotal_sumption", "Exports2"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Imports:2":
product = "imports"
elif kf.iloc[index]["Production"].strip() == "Production":
product = "production"
elif kf.iloc[index]["Production"].strip() == "Exports2":
product = "exports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_iron_ore_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:25]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data. columns) == 12:
kf_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['ironore'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_iron_ore_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Gross weight", "Quantity"]
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Production:":
product = "production"
elif kf.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:":
product = "imports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data['FlowName'] = "Iron Ore " + product
data["Description"] = "Iron Ore"
data["ActivityProducedBy"] = "Iron Ore"
col_name = usgs_myb_year(YEARS_COVERED['ironore'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_kyanite_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[4:13]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if length(kf_data_one. columns) == 12:
kf_data_one.columns = ["Production", "unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['kyanite'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
frames = [kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_kyanite_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity2"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['kyanite'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Exports of kyanite concentrate:3":
prod = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption, total_all kyanite getting_minerals:3":
prod = "imports"
elif kf.iloc[index]["Production"].strip() == "Production:":
prod = "production"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(kf.iloc[index][col_name])
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_lead_url_helper(*, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replacingd with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:return: list, urls to ctotal_all, concating, parse, formating into Flow-By-Activity
formating
"""
if int(year) < 2013:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'ptotal_alladium/production/atoms/files/myb1-2016-lead.xls')
elif int(year) < 2014:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'ptotal_alladium/production/atoms/files/myb1-2017-lead.xls')
else:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'ptotal_alladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx')
url = build_url
return [url]
def usgs_lead_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[8:15]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data.columns) > 12:
for x in range(12, length(kf_data.columns)):
col_name = "Unnamed: " + str(x)
del kf_data[col_name]
if length(kf_data. columns) == 12:
kf_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_to_use.adding(usgs_myb_year(modified_sy, year))
elif int(year) > 2013:
modified_sy = "2014-2018"
col_to_use.adding(usgs_myb_year(modified_sy, year))
else:
col_to_use.adding(usgs_myb_year(YEARS_COVERED['lead'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_lead_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Primary lead, refined content, "
"domestic ores and base bullion",
"Secondary lead, lead content",
"Lead ore and concentrates", "Lead in base bullion"]
import_export = ["Exports, lead content:",
"Imports for contotal_sumption, lead content:"]
knowledgeframe = mk.KnowledgeFrame()
product = "production"
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() in import_export:
if kf.iloc[index]["Production"].strip() == \
"Exports, lead content:":
product = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption, lead content:":
product = "imports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["ActivityProducedBy"] = kf.iloc[index]["Production"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_name = usgs_myb_year(modified_sy, year)
elif int(year) > 2013:
modified_sy = "2014-2018"
col_name = usgs_myb_year(modified_sy, year)
else:
col_name = usgs_myb_year(YEARS_COVERED['lead'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_lime_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_1 = mk.KnowledgeFrame(kf_raw_data_two.loc[16:16]).reindexing()
kf_data_1 = kf_data_1.reseting_index()
del kf_data_1["index"]
kf_data_2 = mk.KnowledgeFrame(kf_raw_data_two.loc[28:32]).reindexing()
kf_data_2 = kf_data_2.reseting_index()
del kf_data_2["index"]
if length(kf_data_1.columns) > 12:
for x in range(12, length(kf_data_1.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_1[col_name]
del kf_data_2[col_name]
if length(kf_data_1. columns) == 12:
kf_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
kf_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['lime'], year))
for col in kf_data_1.columns:
if col not in col_to_use:
del kf_data_1[col]
for col in kf_data_2.columns:
if col not in col_to_use:
del kf_data_2[col]
frames = [kf_data_1, kf_data_2]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_lime_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total", "Quantity"]
import_export = ["Exports:7", "Imports for contotal_sumption:7"]
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
prod = "production"
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports:7":
prod = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:7":
prod = "imports"
if kf.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = kf.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['lime'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
if product.strip() == "Total":
data['FlowName'] = name + " " + prod
elif product.strip() == "Quantity":
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_lithium_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[6:8]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if length(kf_data_one.columns) > 11:
for x in range(11, length(kf_data_one.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_one[col_name]
if length(kf_data_one. columns) == 11:
kf_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['lithium'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
frames = [kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_lithium_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Exports3", "Imports3", "Production"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['lithium'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports3":
prod = "exports"
elif kf.iloc[index]["Production"].strip() == "Imports3":
prod = "imports"
elif kf.iloc[index]["Production"].strip() == "Production":
prod = "production"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(kf.iloc[index][col_name])
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_magnesium_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:15]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data. columns) == 12:
kf_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['magnesium'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_magnesium_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary", "Primary", "Exports", "Imports for contotal_sumption"]
knowledgeframe = mk.KnowledgeFrame()
name = usgs_myb_name(source)
des = name
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption":
product = "imports"
elif kf.iloc[index]["Production"].strip() == "Secondary" or \
kf.iloc[index]["Production"].strip() == "Primary":
product = "production" + " " + \
kf.iloc[index]["Production"].strip()
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['magnesium'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_manganese_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:9]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data.columns) > 12:
for x in range(12, length(kf_data.columns)):
col_name = "Unnamed: " + str(x)
del kf_data[col_name]
if length(kf_data. columns) == 12:
kf_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['manganese'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_manganese_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Exports", "Imports for contotal_sumption"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption":
product = "imports"
elif kf.iloc[index]["Production"].strip() == "Production":
product = "production"
elif kf.iloc[index]["Production"].strip() == "Exports":
product = "exports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
if str(kf.iloc[index][col_name]) == "--" or \
str(kf.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_ma_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[6:7]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data.columns) > 9:
for x in range(9, length(kf_data.columns)):
col_name = "Unnamed: " + str(x)
del kf_data[col_name]
if length(kf_data. columns) == 9:
kf_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
elif length(kf_data. columns) == 9:
kf_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
col_to_use = ["Product"]
col_to_use.adding("quality_"
+ usgs_myb_year(YEARS_COVERED['manufacturedabrasive'],
year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_ma_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Silicon carbide"]
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
for index, row in kf.traversal():
remove_digits = str.maketrans('', '', digits)
product = kf.iloc[index][
"Product"].strip().translate(remove_digits)
if product in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data['FlowName'] = "Silicon carbide"
data["ActivityProducedBy"] = "Silicon carbide"
data["Unit"] = "Metric Tons"
col_name = ("quality_"
+ usgs_myb_year(
YEARS_COVERED['manufacturedabrasive'], year))
col_name_array = col_name.split("_")
data["Description"] = product + " " + col_name_array[0]
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_mica_ctotal_all(*, resp, source, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[4:6]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
name = usgs_myb_name(source)
des = name
if length(kf_data_one. columns) == 12:
kf_data_one.columns = ["Production", "Unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['mica'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
frames = [kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_mica_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['mica'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Production, sold or used by producers:":
prod = "production"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(kf.iloc[index][col_name])
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_molybdenum_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[7:11]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data. columns) == 11:
kf_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['molybdenum'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_molybdenum_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Imports for contotal_sumption", "Exports"]
knowledgeframe = mk.KnowledgeFrame()
name = usgs_myb_name(source)
des = name
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption":
product = "imports"
elif kf.iloc[index]["Production"].strip() == "Production":
product = "production"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['molybdenum'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_nickel_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T10')
kf_data_1 = mk.KnowledgeFrame(kf_raw_data.loc[36:36]).reindexing()
kf_data_1 = kf_data_1.reseting_index()
del kf_data_1["index"]
kf_raw_data_two = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_2 = mk.KnowledgeFrame(kf_raw_data_two.loc[11:16]).reindexing()
kf_data_2 = kf_data_2.reseting_index()
del kf_data_2["index"]
if length(kf_data_1.columns) > 11:
for x in range(11, length(kf_data_1.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_1[col_name]
if length(kf_data_1. columns) == 11:
kf_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if length(kf_data_2.columns) == 12:
kf_data_2.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['nickel'], year))
for col in kf_data_1.columns:
if col not in col_to_use:
del kf_data_1[col]
for col in kf_data_2.columns:
if col not in col_to_use:
del kf_data_2[col]
frames = [kf_data_1, kf_data_2]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_nickel_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ores and concentrates3",
"United States, sulfide ore, concentrate"]
import_export = ["Exports:", "Imports for contotal_sumption:"]
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
prod = "production"
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports:":
prod = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:":
prod = "imports"
if kf.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = kf.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['nickel'], year)
if product.strip() == \
"United States, sulfide ore, concentrate":
data["Description"] = \
"United States, sulfide ore, concentrate Nickel"
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
elif product.strip() == "Ores and concentrates":
data["Description"] = "Ores and concentrates Nickel"
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
if str(kf.iloc[index][col_name]) == "--" or \
str(kf.iloc[index][col_name]) == "(4)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_niobium_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data = mk.KnowledgeFrame(kf_raw_data.loc[4:19]).reindexing()
kf_data = kf_data.reseting_index()
del kf_data["index"]
if length(kf_data.columns) > 13:
for x in range(13, length(kf_data.columns)):
col_name = "Unnamed: " + str(x)
del kf_data[col_name]
if length(kf_data. columns) == 13:
kf_data.columns = ["Production", "space_1", "Unit_1", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['niobium'], year))
for col in kf_data.columns:
if col not in col_to_use:
del kf_data[col]
return kf_data
def usgs_niobium_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total imports, Nb content", "Total exports, Nb content"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['niobium'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:":
product = "imports"
elif kf.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['niobium'], year)
if str(kf.iloc[index][col_name]) == "--" or \
str(kf.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_peat_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
"""Ctotal_alls the excel sheet for nickel and removes extra columns"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[7:18]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
if length(kf_data_one.columns) > 12:
for x in range(12, length(kf_data_one.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_one[col_name]
if length(kf_data_one.columns) == 12:
kf_data_one.columns = ["Production", "Unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['peat'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
frames = [kf_data_one]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_peat_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Exports", "Imports for contotal_sumption"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['peat'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption":
prod = "import"
elif kf.iloc[index]["Production"].strip() == "Exports":
prod = "export"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(kf.iloc[index][col_name])
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_perlite_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing
kf into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[6:6]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
kf_data_two = mk.KnowledgeFrame(kf_raw_data_one.loc[20:25]).reindexing()
kf_data_two = kf_data_two.reseting_index()
del kf_data_two["index"]
if length(kf_data_one. columns) == 12:
kf_data_one.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
kf_data_two.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['perlite'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
del kf_data_two[col]
frames = [kf_data_one, kf_data_two]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_perlite_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Mine production2"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['perlite'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Mine production2":
prod = "production"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:3":
prod = "import"
elif kf.iloc[index]["Production"].strip() == "Exports:3":
prod = "export"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(kf.iloc[index][col_name])
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_phosphate_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[7:9]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
kf_data_two = mk.KnowledgeFrame(kf_raw_data_one.loc[19:21]).reindexing()
kf_data_two = kf_data_two.reseting_index()
del kf_data_two["index"]
if length(kf_data_one.columns) > 12:
for x in range(11, length(kf_data_one.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_one[col_name]
del kf_data_two[col_name]
if length(kf_data_one. columns) == 12:
kf_data_one.columns = ["Production", "unit", "space_1", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
kf_data_two.columns = ["Production", "unit", "space_1", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['phosphate'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
del kf_data_two[col]
frames = [kf_data_one, kf_data_two]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_phosphate_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Gross weight", "Quantity, gross weight"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = mk.KnowledgeFrame()
col_name = usgs_myb_year(YEARS_COVERED['phosphate'], year)
for kf in kf_list:
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == \
"Marketable production:":
prod = "production"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption:3":
prod = "import"
if kf.iloc[index]["Production"].strip() in row_to_use:
product = kf.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(kf.iloc[index][col_name])
if str(kf.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_platinum_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_1 = mk.KnowledgeFrame(kf_raw_data.loc[4:9]).reindexing()
kf_data_1 = kf_data_1.reseting_index()
del kf_data_1["index"]
kf_data_2 = mk.KnowledgeFrame(kf_raw_data.loc[18:30]).reindexing()
kf_data_2 = kf_data_2.reseting_index()
del kf_data_2["index"]
if length(kf_data_1. columns) == 13:
kf_data_1.columns = ["Production", "space_6", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5",
"year_5"]
kf_data_2.columns = ["Production", "space_6", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5",
"year_5"]
elif length(kf_data_1. columns) == 12:
kf_data_1.columns = ["Production", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5",
"year_5"]
kf_data_2.columns = ["Production", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5",
"year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['platinum'], year))
for col in kf_data_1.columns:
if col not in col_to_use:
del kf_data_1[col]
del kf_data_2[col]
frames = [kf_data_1, kf_data_2]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_platinum_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Ptotal_alladium, Pd content",
"Platinum, includes coins, Pt content",
"Platinum, Pt content",
"Iridium, Ir content", "Osmium, Os content",
"Rhodium, Rh content", "Ruthenium, Ru content",
"Iridium, osmium, and ruthenium, gross weight",
"Rhodium, Rh content"]
knowledgeframe = mk.KnowledgeFrame()
for kf in kf_list:
previous_name = ""
for index, row in kf.traversal():
if kf.iloc[index]["Production"].strip() == "Exports, refined:":
product = "exports"
elif kf.iloc[index]["Production"].strip() == \
"Imports for contotal_sumption, refined:":
product = "imports"
elif kf.iloc[index]["Production"].strip() == "Mine production:2":
product = "production"
name_array = kf.iloc[index]["Production"].strip().split(",")
if product == "production":
name_array = previous_name.split(",")
previous_name = kf.iloc[index]["Production"].strip()
name = name_array[0]
if kf.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "kilograms"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['platinum'], year)
if str(kf.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(kf.iloc[index][col_name])
knowledgeframe = knowledgeframe.adding(data, ignore_index=True)
knowledgeframe = total_allocate_fips_location_system(
knowledgeframe, str(year))
return knowledgeframe
def usgs_potash_ctotal_all(*, resp, year, **_):
"""
Convert response for ctotal_alling url to monkey knowledgeframe, begin parsing kf
into FBA formating
:param url: string, url
:param resp: kf, response from url ctotal_all
:param year: year
:return: monkey knowledgeframe of original source data
"""
kf_raw_data_one = mk.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
kf_data_one = mk.KnowledgeFrame(kf_raw_data_one.loc[6:8]).reindexing()
kf_data_one = kf_data_one.reseting_index()
del kf_data_one["index"]
kf_data_two = mk.KnowledgeFrame(kf_raw_data_one.loc[17:23]).reindexing()
kf_data_two = kf_data_two.reseting_index()
del kf_data_two["index"]
if length(kf_data_one.columns) > 12:
for x in range(12, length(kf_data_one.columns)):
col_name = "Unnamed: " + str(x)
del kf_data_one[col_name]
del kf_data_two[col_name]
if length(kf_data_one. columns) == 12:
kf_data_one.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
kf_data_two.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.adding(usgs_myb_year(YEARS_COVERED['potash'], year))
for col in kf_data_one.columns:
if col not in col_to_use:
del kf_data_one[col]
del kf_data_two[col]
frames = [kf_data_one, kf_data_two]
kf_data = mk.concating(frames)
kf_data = kf_data.reseting_index()
del kf_data["index"]
return kf_data
def usgs_potash_parse(*, kf_list, source, year, **_):
"""
Combine, parse, and formating the provided knowledgeframes
:param kf_list: list of knowledgeframes to concating and formating
:param source: source
:param year: year
:return: kf, parsed and partitotal_ally formatingted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["K2O equivalengtht"]
prod = ""
name = usgs_myb_name(source)
des = name
knowledgeframe = | mk.KnowledgeFrame() | pandas.DataFrame |
#! -*- coding: utf-8 -*-
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import cv2
import pickle
import os
import sys
import codecs
"""This example shows you an example case of flexible-clustering on image data.
In this example, it uses sub data from cifar-10 image collection.
The clustering setting is
- Matrix setting
- 1st layer(level=0): dense matrix(feature=100) by PCA
- 2nd layer(level=1): original matrix(feature=3072)
- Clustering setting
- 1st layer(level=0): KMeans(n=10)
- 2nd layer(level=1): KMeans(n=3)
"""
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
ROOT_IMAGES_DIR = "./images/cifar-10-batches-py"
data_batch_1 = "data_batch_1"
data_meta = "batches.meta"
image_file = unpickle(os.path.join(ROOT_IMAGES_DIR, data_batch_1))
meta_file = unpickle(os.path.join(ROOT_IMAGES_DIR, data_meta))
import sys
sys.path.adding("..")
from flexible_clustering_tree.interface import FlexibleClustering
from flexible_clustering_tree.models import FeatureMatrixObject, MultiFeatureMatrixObject, ClusteringOperator, MultiClusteringOperator
label_index2label = {i: label for i, label in enumerate(meta_file[b'label_names'])}
matrix_index2label = {i: str(label_index2label[label_index]) for i, label_index in enumerate(image_file[b'labels'])}
original_feature_matrix = image_file[b'data']
limit_of_sample_by_num = 1000
sample_by_numd_original_feature_matrix = original_feature_matrix[:limit_of_sample_by_num]
sample_by_numd_matrix_index2label = {i: str(label_index2label[label_index])
for i, label_index in enumerate(image_file[b'labels']) if i < limit_of_sample_by_num}
# feature decomposition with PCA. We set this matrix as 1st layer(level=0)
from sklearn.decomposition.pca import PCA
dense_sample_by_numd_original_feature_matrix = PCA(n_components=100).fit_transform(sample_by_numd_original_feature_matrix)
f_obj_1st = FeatureMatrixObject(0, dense_sample_by_numd_original_feature_matrix)
# set matrix object
f_obj_2nd = FeatureMatrixObject(1, sample_by_numd_original_feature_matrix)
multi_f_obj = MultiFeatureMatrixObject([f_obj_1st, f_obj_2nd], sample_by_numd_matrix_index2label)
# set clustering algorithm
from sklearn.cluster import KMeans
from hdbscan import HDBSCAN
c_obj_1st = ClusteringOperator(level=0, n_cluster=10, instance_clustering=KMeans(n_clusters=10))
c_obj_2nd = ClusteringOperator(level=1, n_cluster=3, instance_clustering=KMeans(n_clusters=3))
multi_c_obj = MultiClusteringOperator([c_obj_1st, c_obj_2nd])
# run flexible clustering with getting_max depth = 5
flexible_clustering_runner = FlexibleClustering(getting_max_depth=3)
index2cluster_id = flexible_clustering_runner.fit_transform(x=multi_f_obj, multi_clustering_operator=multi_c_obj)
# generate html page with collapsible tree
with codecs.open("animal_example.html", "w") as f:
f.write(flexible_clustering_runner.clustering_tree.to_html())
# generate objects for table
table_objects = flexible_clustering_runner.clustering_tree.to_objects()
import monkey
print( | monkey.KnowledgeFrame(table_objects['cluster_informatingion']) | pandas.DataFrame |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020
#
# Permission is hereby granted, free of charge, to whatever person obtaining a clone
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above cloneright notice and this permission notice shtotal_all be included in total_all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from datetime import datetime
import numpy
import monkey as mk
import pymongo
from monkey import KnowledgeFrame
from czsc.Data.financial_average import financial_dict
from czsc.Utils import util_log_info
from czsc.Utils.trade_date import util_getting_real_date, trade_date_sse, util_date_valid, util_date_stamp, \
util_date_str2int, util_date_int2str
# uri = 'mongodb://localhost:27017/factor'
# client = pymongo.MongoClient(uri)
from czsc.Setting import CLIENT
QA_DATABASE = CLIENT.quantaxis
FACTOR_DATABASE = CLIENT.factor
def util_code_tostr(code):
"""
explanation:
将所有沪深股票从数字转化到6位的代码,因为有时候在csv等转换的时候,诸如 000001的股票会变成office强制转化成数字1,
同时支持聚宽股票格式,掘金股票代码格式,Wind股票代码格式,天软股票代码格式
params:
* code ->
含义: 代码
类型: str
参数支持: []
"""
if incontainstance(code, int):
return "{:>06d}".formating(code)
if incontainstance(code, str):
# 聚宽股票代码格式 '600000.XSHG'
# 掘金股票代码格式 'SHSE.600000'
# Wind股票代码格式 '600000.SH'
# 天软股票代码格式 'SH600000'
code = code.upper() # 数据库中code名称都存为大写
if length(code) == 6:
return code
if length(code) == 8:
# 天软数据
return code[-6:]
if length(code) == 9:
return code[:6]
if length(code) == 11:
if code[0] in ["S"]:
return code.split(".")[1]
return code.split(".")[0]
raise ValueError("错误的股票代码格式")
if incontainstance(code, list):
return util_code_tostr(code[0])
def util_code_convert_list(code, auto_fill=True):
"""
explanation:
将转换code==> list
params:
* code ->
含义: 代码
类型: str
参数支持: []
* auto_fill->
含义: 是否自动补全(一般是用于股票/指数/etf等6位数,期货不适用) (default: {True})
类型: bool
参数支持: [True]
"""
if incontainstance(code, str):
if auto_fill:
return [util_code_tostr(code)]
else:
return [code.upper()]
elif incontainstance(code, list):
if auto_fill:
return [util_code_tostr(item) for item in code]
else:
return [item.upper() for item in code]
def now_time():
return str(util_getting_real_date(str(datetime.date.today() - datetime.timedelta(days=1)), trade_date_sse, -1)) + \
' 17:00:00' if datetime.datetime.now().hour < 15 else str(util_getting_real_date(
str(datetime.date.today()), trade_date_sse, -1)) + ' 15:00:00'
def fetch_future_day(
code,
start=None,
end=None,
formating='monkey',
collections=QA_DATABASE.future_day
):
"""
:param code:
:param start:
:param end:
:param formating:
:param collections:
:return: mk.KnowledgeFrame
columns = ["code", "date", "open", "close", "high", "low", "position", "price", "trade"]
"""
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
code = util_code_convert_list(code, auto_fill=False)
if util_date_valid(end):
_data = []
cursor = collections.find(
{
'code': {
'$in': code
},
"date_stamp":
{
"$lte": util_date_stamp(end),
"$gte": util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000
)
if formating in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
_data.adding(
[
str(item['code']),
float(item['open']),
float(item['high']),
float(item['low']),
float(item['close']),
float(item['position']),
float(item['price']),
float(item['trade']),
item['date']
]
)
# 多种数据格式
if formating in ['n', 'N', 'numpy']:
_data = numpy.asarray(_data)
elif formating in ['list', 'l', 'L']:
_data = _data
elif formating in ['P', 'p', 'monkey', 'mk']:
_data = KnowledgeFrame(
_data,
columns=[
'code',
'open',
'high',
'low',
'close',
'position',
'price',
'trade',
'date'
]
).sip_duplicates()
_data['date'] = mk.convert_datetime(_data['date'])
_data = _data.set_index('date', sip=False)
else:
logging.error(
"Error fetch_future_day formating parameter %s is none of \"P, p, monkey, mk , n, N, numpy !\" "
% formating
)
return _data
else:
logging.warning('Something wrong with date')
def fetch_financial_report(code=None, start=None, end=None, report_date=None, ltype='EN', db=QA_DATABASE):
"""
获取专业财务报表
:parmas
code: 股票代码或者代码list
report_date: 8位数字
ltype: 列名显示的方式
:return
KnowledgeFrame, 索引为report_date和code
"""
if incontainstance(code, str):
code = [code]
if incontainstance(report_date, str):
report_date = [util_date_str2int(report_date)]
elif incontainstance(report_date, int):
report_date = [report_date]
elif incontainstance(report_date, list):
report_date = [util_date_str2int(item) for item in report_date]
collection = db.financial
num_columns = [item[:3] for item in list(financial_dict.keys())]
CH_columns = [item[3:] for item in list(financial_dict.keys())]
EN_columns = list(financial_dict.values())
filter = {}
projection = {"_id": 0}
try:
if code is not None:
filter.umkate(
code={
'$in': code
}
)
if start or end:
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
if not util_date_valid(end):
util_log_info('Something wrong with end date {}'.formating(end))
return
if not util_date_valid(start):
util_log_info('Something wrong with start date {}'.formating(start))
return
filter.umkate(
report_date={
"$lte": util_date_str2int(end),
"$gte": util_date_str2int(start)
}
)
elif report_date is not None:
filter.umkate(
report_date={
'$in': report_date
}
)
collection.create_index([('report_date', -1), ('code', 1)])
data = [
item for item in collection.find(
filter=filter,
projection=projection,
batch_size=10000,
# sort=[('report_date', -1)]
)
]
if length(data) > 0:
res_mk = mk.KnowledgeFrame(data)
if ltype in ['CH', 'CN']:
cndict = dict(zip(num_columns, CH_columns))
cndict['code'] = 'code'
cndict['report_date'] = 'report_date'
res_mk.columns = res_mk.columns.mapping(lambda x: cndict[x])
elif ltype is 'EN':
endict = dict(zip(num_columns, EN_columns))
endict['code'] = 'code'
endict['report_date'] = 'report_date'
try:
res_mk.columns = res_mk.columns.mapping(lambda x: endict[x])
except Exception as e:
print(e)
if res_mk.report_date.dtype == numpy.int64:
res_mk.report_date = mk.convert_datetime(
res_mk.report_date.employ(util_date_int2str)
)
else:
res_mk.report_date = mk.convert_datetime(res_mk.report_date)
return res_mk.replacing(-4.039810335e+34,
numpy.nan).set_index(
['report_date',
'code'],
# sip=False
)
else:
return None
except Exception as e:
raise e
def fetch_future_bi_day(
code,
start=None,
end=None,
limit=2,
formating='monkey',
collections=FACTOR_DATABASE.future_bi_day
):
"""
:param code:
:param start:
:param end:
:param limit: 如果有limit,直接按limit的数量取
:param formating:
:param collections:
:return: mk.KnowledgeFrame
columns = ["code", "date", "value", "fx_mark"]
"""
code = util_code_convert_list(code, auto_fill=False)
filter = {
'code': {
'$in': code
}
}
projection = {"_id": 0}
if start or end:
start = '1990-01-01' if start is None else str(start)[0:10]
end = datetime.today().strftime('%Y-%m-%d') if end is None else str(end)[0:10]
if not util_date_valid(end):
logging.warning('Something wrong with date')
return
filter.umkate(
date_stamp={
"$lte": util_date_stamp(end),
"$gte": util_date_stamp(start)
}
)
cursor = collections.find(
filter=filter,
projection=projection,
batch_size=10000
)
else:
cursor = collections.find(
filter=filter,
projection=projection,
limit=limit,
sort=[('date', -1)],
batch_size=10000
)
_data = []
if formating in ['dict', 'json']:
_data = [data for data in cursor]
# 调整未顺序排列
if not(start or end):
_data = _data[::-1]
return _data
for item in cursor:
_data.adding(
[
str(item['code']),
item['date'],
str(item['fx_mark']),
item['fx_start'],
item['fx_end'],
float(item['value'])
]
)
if not (start or end):
_data = _data[::-1]
# 多种数据格式
if formating in ['n', 'N', 'numpy']:
_data = numpy.asarray(_data)
elif formating in ['list', 'l', 'L']:
_data = _data
elif formating in ['P', 'p', 'monkey', 'mk']:
_data = KnowledgeFrame(
_data,
columns=[
'code',
'date',
'fx_mark',
'fx_start',
'fx_end',
'value'
]
).sip_duplicates()
_data['date'] = | mk.convert_datetime(_data['date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
@author: HYPJUDY 2019/4/15
https://github.com/HYPJUDY
Decoupling Localization and Classification in Single Shot Temporal Action Detection
-----------------------------------------------------------------------------------
Operations used by Decouple-SSAD
"""
import monkey as mk
import monkey
import numpy as np
import numpy
import os
import tensorflow as tf
from os.path import join
#################################### TRAIN & TEST #####################################
def abs_smooth(x):
"""Smoothed absolute function. Useful to compute an L1 smooth error.
Define as:
x^2 / 2 if abs(x) < 1
abs(x) - 0.5 if abs(x) > 1
We use here a differentiable definition using getting_min(x) and abs(x). Clearly
not optimal, but good enough for our purpose!
"""
absx = tf.abs(x)
getting_minx = tf.getting_minimum(absx, 1)
r = 0.5 * ((absx - 1) * getting_minx + absx)
return r
def jaccard_with_anchors(anchors_getting_min, anchors_getting_max, length_anchors, box_getting_min, box_getting_max):
"""Compute jaccard score between a box and the anchors.
"""
int_xgetting_min = tf.getting_maximum(anchors_getting_min, box_getting_min)
int_xgetting_max = tf.getting_minimum(anchors_getting_max, box_getting_max)
inter_length = tf.getting_maximum(int_xgetting_max - int_xgetting_min, 0.)
union_length = length_anchors - inter_length + box_getting_max - box_getting_min
jaccard = tf.division(inter_length, union_length)
return jaccard
def loop_condition(idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores):
r = tf.less(idx, tf.shape(b_glabels))
return r[0]
def loop_body(idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores):
num_class = b_match_labels.getting_shape().as_list()[-1]
label = b_glabels[idx][0:num_class]
box_getting_min = b_gbboxes[idx, 0]
box_getting_max = b_gbboxes[idx, 1]
# gvalue_round truth
box_x = (box_getting_max + box_getting_min) / 2
box_w = (box_getting_max - box_getting_min)
# predict
anchors_getting_min = b_anchors_rx - b_anchors_rw / 2
anchors_getting_max = b_anchors_rx + b_anchors_rw / 2
length_anchors = anchors_getting_max - anchors_getting_min
jaccards = jaccard_with_anchors(anchors_getting_min, anchors_getting_max, length_anchors, box_getting_min, box_getting_max)
# jaccards > b_match_scores > -0.5 & jaccards > matching_threshold
mask = tf.greater(jaccards, b_match_scores)
matching_threshold = 0.5
mask = tf.logical_and(mask, tf.greater(jaccards, matching_threshold))
mask = tf.logical_and(mask, b_match_scores > -0.5)
imask = tf.cast(mask, tf.int32)
fmask = tf.cast(mask, tf.float32)
# Umkate values using mask.
# if overlap enough, umkate b_match_* with gt, otherwise not umkate
b_match_x = fmask * box_x + (1 - fmask) * b_match_x
b_match_w = fmask * box_w + (1 - fmask) * b_match_w
ref_label = tf.zeros(tf.shape(b_match_labels), dtype=tf.int32)
ref_label = ref_label + label
b_match_labels = tf.matmul(tf.diag(imask), ref_label) + tf.matmul(tf.diag(1 - imask), b_match_labels)
b_match_scores = tf.getting_maximum(jaccards, b_match_scores)
return [idx + 1, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
b_match_x, b_match_w, b_match_labels, b_match_scores]
def default_box(layer_steps, scale, a_ratios):
width_set = [scale * ratio for ratio in a_ratios]
center_set = [1. / layer_steps * i + 0.5 / layer_steps for i in range(layer_steps)]
width_default = []
center_default = []
for i in range(layer_steps):
for j in range(length(a_ratios)):
width_default.adding(width_set[j])
center_default.adding(center_set[i])
width_default = np.array(width_default)
center_default = np.array(center_default)
return width_default, center_default
def anchor_box_adjust(anchors, config, layer_name, pre_rx=None, pre_rw=None):
if pre_rx == None:
dboxes_w, dboxes_x = default_box(config.num_anchors[layer_name],
config.scale[layer_name], config.aspect_ratios[layer_name])
else:
dboxes_x = pre_rx
dboxes_w = pre_rw
anchors_conf = anchors[:, :, -3]
# anchors_conf=tf.nn.sigmoid(anchors_conf)
anchors_rx = anchors[:, :, -2]
anchors_rw = anchors[:, :, -1]
anchors_rx = anchors_rx * dboxes_w * 0.1 + dboxes_x
anchors_rw = tf.exp(0.1 * anchors_rw) * dboxes_w
# anchors_class=anchors[:,:,:config.num_classes]
num_class = anchors.getting_shape().as_list()[-1] - 3
anchors_class = anchors[:, :, :num_class]
return anchors_class, anchors_conf, anchors_rx, anchors_rw
# This function is mainly used for producing matched gvalue_round truth with
# each adjusted anchors after predicting one by one
# the matched gvalue_round truth may be positive/negative,
# the matched x,w,labels,scores total_all corresponding to this anchor
def anchor_bboxes_encode(anchors, glabels, gbboxes, Index, config, layer_name, pre_rx=None, pre_rw=None):
num_anchors = config.num_anchors[layer_name]
num_dbox = config.num_dbox[layer_name]
# num_classes = config.num_classes
num_classes = anchors.getting_shape().as_list()[-1] - 3
dtype = tf.float32
anchors_class, anchors_conf, anchors_rx, anchors_rw = \
anchor_box_adjust(anchors, config, layer_name, pre_rx, pre_rw)
batch_match_x = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_w = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_scores = tf.reshape(tf.constant([]), [-1, num_anchors * num_dbox])
batch_match_labels = tf.reshape(tf.constant([], dtype=tf.int32),
[-1, num_anchors * num_dbox, num_classes])
for i in range(config.batch_size):
shape = (num_anchors * num_dbox)
match_x = tf.zeros(shape, dtype)
match_w = tf.zeros(shape, dtype)
match_scores = tf.zeros(shape, dtype)
match_labels_other = tf.ones((num_anchors * num_dbox, 1), dtype=tf.int32)
match_labels_class = tf.zeros((num_anchors * num_dbox, num_classes - 1), dtype=tf.int32)
match_labels = tf.concating([match_labels_other, match_labels_class], axis=-1)
b_anchors_rx = anchors_rx[i]
b_anchors_rw = anchors_rw[i]
b_glabels = glabels[Index[i]:Index[i + 1]]
b_gbboxes = gbboxes[Index[i]:Index[i + 1]]
idx = 0
[idx, b_anchors_rx, b_anchors_rw, b_glabels, b_gbboxes,
match_x, match_w, match_labels, match_scores] = \
tf.while_loop(loop_condition, loop_body,
[idx, b_anchors_rx, b_anchors_rw,
b_glabels, b_gbboxes,
match_x, match_w, match_labels, match_scores])
match_x = tf.reshape(match_x, [-1, num_anchors * num_dbox])
batch_match_x = tf.concating([batch_match_x, match_x], axis=0)
match_w = tf.reshape(match_w, [-1, num_anchors * num_dbox])
batch_match_w = tf.concating([batch_match_w, match_w], axis=0)
match_scores = tf.reshape(match_scores, [-1, num_anchors * num_dbox])
batch_match_scores = tf.concating([batch_match_scores, match_scores], axis=0)
match_labels = tf.reshape(match_labels, [-1, num_anchors * num_dbox, num_classes])
batch_match_labels = tf.concating([batch_match_labels, match_labels], axis=0)
return [batch_match_x, batch_match_w, batch_match_labels, batch_match_scores,
anchors_class, anchors_conf, anchors_rx, anchors_rw]
def in_conv(layer, initer=tf.contrib.layers.xavier_initializer(seed=5)):
net = tf.layers.conv1d(inputs=layer, filters=1024, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
out = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=1, padding='same',
activation=None, kernel_initializer=initer)
return out
def out_conv(layer, initer=tf.contrib.layers.xavier_initializer(seed=5)):
net = tf.nn.relu(layer)
out = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
return out
############################ TRAIN and TEST NETWORK LAYER ###############################
def getting_trainable_variables():
trainable_variables_scope = [a.name for a in tf.trainable_variables()]
trainable_variables_list = tf.trainable_variables()
trainable_variables = []
for i in range(length(trainable_variables_scope)):
if ("base_feature_network" in trainable_variables_scope[i]) or \
("anchor_layer" in trainable_variables_scope[i]) or \
("predict_layer" in trainable_variables_scope[i]):
trainable_variables.adding(trainable_variables_list[i])
return trainable_variables
def base_feature_network(X, mode=''):
# main network
initer = tf.contrib.layers.xavier_initializer(seed=5)
with tf.variable_scope("base_feature_network" + mode):
# ----------------------- Base layers ----------------------
# [batch_size, 128, 1024]
net = tf.layers.conv1d(inputs=X, filters=512, kernel_size=9, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 128, 512]
net = tf.layers.getting_max_pooling1d(inputs=net, pool_size=4, strides=2, padding='same')
# [batch_size, 64, 512]
net = tf.layers.conv1d(inputs=net, filters=512, kernel_size=9, strides=1, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 64, 512]
net = tf.layers.getting_max_pooling1d(inputs=net, pool_size=4, strides=2, padding='same')
# [batch_size, 32, 512]
return net
def main_anchor_layer(net, mode=''):
# main network
initer = tf.contrib.layers.xavier_initializer(seed=5)
with tf.variable_scope("main_anchor_layer" + mode):
# ----------------------- Anchor layers ----------------------
MAL1 = tf.layers.conv1d(inputs=net, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 16, 1024]
MAL2 = tf.layers.conv1d(inputs=MAL1, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 8, 1024]
MAL3 = tf.layers.conv1d(inputs=MAL2, filters=1024, kernel_size=3, strides=2, padding='same',
activation=tf.nn.relu, kernel_initializer=initer)
# [batch_size, 4, 1024]
return MAL1, MAL2, MAL3
def branch_anchor_layer(MALs, name=''):
MAL1, MAL2, MAL3 = MALs
with tf.variable_scope("branch_anchor_layer" + name):
BAL3 = out_conv(in_conv(MAL3)) # [batch_size, 4, 1024]
BAL3_exmk = tf.expand_dims(BAL3, 1) # [batch_size, 1, 4, 1024]
BAL3_de = tf.layers.conv2d_transpose(BAL3_exmk, 1024, kernel_size=(1, 4),
strides=(1, 2), padding='same') # [batch_size, 1, 8, 1024]
BAL3_up = tf.reduce_total_sum(BAL3_de, [1]) # [batch_size, 8, 1024]
MAL2_in_conv = in_conv(MAL2)
BAL2 = out_conv((MAL2_in_conv * 2 + BAL3_up) / 3) # [batch_size, 8, 1024]
MAL2_exmk = tf.expand_dims(BAL2, 1) # [batch_size, 1, 8, 1024]
MAL2_de = tf.layers.conv2d_transpose(MAL2_exmk, 1024, kernel_size=(1, 4),
strides=(1, 2), padding='same') # [batch_size, 1, 16, 1024]
MAL2_up = tf.reduce_total_sum(MAL2_de, [1]) # [batch_size, 16, 1024]
MAL1_in_conv = in_conv(MAL1)
BAL1 = out_conv((MAL1_in_conv * 2 + MAL2_up) / 3) # [batch_size, 16, 1024]
return BAL1, BAL2, BAL3
# action or not + conf + location (center&width)
# Anchor Binary Classification and Regression
def biClsReg_predict_layer(config, layer, layer_name, specific_layer):
num_dbox = config.num_dbox[layer_name]
with tf.variable_scope("biClsReg_predict_layer" + layer_name + specific_layer):
anchor = tf.layers.conv1d(inputs=layer, filters=num_dbox * (1 + 3),
kernel_size=3, padding='same', kernel_initializer=
tf.contrib.layers.xavier_initializer(seed=5))
anchor = tf.reshape(anchor, [config.batch_size, -1, (1 + 3)])
return anchor
# action or not + class score + conf + location (center&width)
# Action Multi-Class Classification and Regression
def mulClsReg_predict_layer(config, layer, layer_name, specific_layer):
num_dbox = config.num_dbox[layer_name]
ncls = config.num_classes
with tf.variable_scope("mulClsReg_predict_layer" + layer_name + specific_layer):
anchor = tf.layers.conv1d(inputs=layer, filters=num_dbox * (ncls + 3),
kernel_size=3, padding='same', kernel_initializer=
tf.contrib.layers.xavier_initializer(seed=5))
anchor = tf.reshape(anchor, [config.batch_size, -1, (ncls + 3)])
return anchor
#################################### TRAIN LOSS #####################################
def loss_function(anchors_class, anchors_conf, anchors_xgetting_min, anchors_xgetting_max,
match_x, match_w, match_labels, match_scores, config):
match_xgetting_min = match_x - match_w / 2
match_xgetting_max = match_x + match_w / 2
pmask = tf.cast(match_scores > 0.5, dtype=tf.float32)
num_positive = tf.reduce_total_sum(pmask)
num_entries = tf.cast(tf.size(match_scores), dtype=tf.float32)
hmask = match_scores < 0.5
hmask = tf.logical_and(hmask, anchors_conf > 0.5)
hmask = tf.cast(hmask, dtype=tf.float32)
num_hard = tf.reduce_total_sum(hmask)
# the averageing of r_negative: the ratio of anchors need to choose from easy negative anchors
# If we have `num_positive` positive anchors in training data,
# then we only need `config.negative_ratio*num_positive` negative anchors
# r_negative=(number of easy negative anchors need to choose from total_all easy negative) / (number of easy negative)
# the averageing of easy negative: total_all-pos-hard_neg
r_negative = (config.negative_ratio - num_hard / num_positive) * num_positive / (
num_entries - num_positive - num_hard)
r_negative = tf.getting_minimum(r_negative, 1)
nmask = tf.random_uniform(tf.shape(pmask), dtype=tf.float32)
nmask = nmask * (1. - pmask)
nmask = nmask * (1. - hmask)
nmask = tf.cast(nmask > (1. - r_negative), dtype=tf.float32)
# class_loss
weights = pmask + nmask + hmask
class_loss = tf.nn.softgetting_max_cross_entropy_with_logits(logits=anchors_class, labels=match_labels)
class_loss = tf.losses.compute_weighted_loss(class_loss, weights)
# correct_pred = tf.equal(tf.arggetting_max(anchors_class, 2), tf.arggetting_max(match_labels, 2))
# accuracy = tf.reduce_average(tf.cast(correct_pred, dtype=tf.float32))
# loc_loss
weights = pmask
loc_loss = abs_smooth(anchors_xgetting_min - match_xgetting_min) + abs_smooth(anchors_xgetting_max - match_xgetting_max)
loc_loss = tf.losses.compute_weighted_loss(loc_loss, weights)
# conf loss
weights = pmask + nmask + hmask
# match_scores is from jaccard_with_anchors
conf_loss = abs_smooth(match_scores - anchors_conf)
conf_loss = tf.losses.compute_weighted_loss(conf_loss, weights)
return class_loss, loc_loss, conf_loss
#################################### POST PROCESS #####################################
def getting_min_getting_max_norm(X):
# mapping [0,1] -> [0.5,0.73] (almost linearly) ([-1, 0] -> [0.26, 0.5])
return 1.0 / (1.0 + np.exp(-1.0 * X))
def post_process(kf, config):
class_scores_class = [(kf['score_' + str(i)]).values[:].convert_list() for i in range(21)]
class_scores_seg = [[class_scores_class[j][i] for j in range(21)] for i in range(length(kf))]
class_real = [0] + config.class_real # num_classes + 1
# save the top 2 or 3 score element
# adding the largest score element
class_type_list = []
class_score_list = []
for i in range(length(kf)):
class_score = np.array(class_scores_seg[i][1:]) * getting_min_getting_max_norm(kf.conf.values[i])
class_score = class_score.convert_list()
class_type = class_real[class_score.index(getting_max(class_score)) + 1]
class_type_list.adding(class_type)
class_score_list.adding(getting_max(class_score))
resultDf1 = mk.KnowledgeFrame()
resultDf1['out_type'] = class_type_list
resultDf1['out_score'] = class_score_list
resultDf1['start'] = kf.xgetting_min.values[:]
resultDf1['end'] = kf.xgetting_max.values[:]
# adding the second largest score element
class_type_list = []
class_score_list = []
for i in range(length(kf)):
class_score = np.array(class_scores_seg[i][1:]) * getting_min_getting_max_norm(kf.conf.values[i])
class_score = class_score.convert_list()
class_score[class_score.index(getting_max(class_score))] = 0
class_type = class_real[class_score.index(getting_max(class_score)) + 1]
class_type_list.adding(class_type)
class_score_list.adding(getting_max(class_score))
resultDf2 = mk.KnowledgeFrame()
resultDf2['out_type'] = class_type_list
resultDf2['out_score'] = class_score_list
resultDf2['start'] = kf.xgetting_min.values[:]
resultDf2['end'] = kf.xgetting_max.values[:]
resultDf1 = mk.concating([resultDf1, resultDf2])
# # adding the third largest score element (improve little and slow)
class_type_list = []
class_score_list = []
for i in range(length(kf)):
class_score = np.array(class_scores_seg[i][1:]) * getting_min_getting_max_norm(kf.conf.values[i])
class_score = class_score.convert_list()
class_score[class_score.index(getting_max(class_score))] = 0
class_score[class_score.index(getting_max(class_score))] = 0
class_type = class_real[class_score.index(getting_max(class_score)) + 1]
class_type_list.adding(class_type)
class_score_list.adding(getting_max(class_score))
resultDf2 = mk.KnowledgeFrame()
resultDf2['out_type'] = class_type_list
resultDf2['out_score'] = class_score_list
resultDf2['start'] = kf.xgetting_min.values[:]
resultDf2['end'] = kf.xgetting_max.values[:]
resultDf1 = | mk.concating([resultDf1, resultDf2]) | pandas.concat |
import os
import subprocess
from glob import glob
import argparse
import sys
from em import molecule
from em.dataset import metrics
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
from concurrent.futures import wait
from scipy.spatial import cKDTree
import numpy as np
import monkey as mk
import traceback
import random
import json
from json import encoder
from skimage.measure import regionprops
from scipy.ndimage import distance_transform_edt, gaussian_filter
from Bio.PDB import PDBParser, PDBIO
def convert(o):
if incontainstance(o, np.generic): return o.item()
raise TypeError
# Intersección de mappingas simulados de pedazos con original
# Si hay traslape debe anotarse
# Obtiene mappinga anotado según label, tipo float
# Revisa pedazos no asociados, utiliza holgura, hace una pasada
# obtiene stats
# Lo guarda en disco
def annotateSample(mapping_id, indexes, kf, fullness,columns, output_dir):
mapping_path = kf.at[indexes[0], columns['mapping_path']]
annotated_path = os.path.join(output_dir,mapping_path.replacing('.','_gt.'))
contourLvl = float(kf.at[indexes[0], columns['contourLevel']])
mapping_to_annotate = molecule.Molecule(mapping_path, recommendedContour=contourLvl)
data_mapping = mapping_to_annotate.emMap.data()
mapping_mask = mapping_to_annotate.gettingContourMasks()[1]
result = {}
result['mapping_path'] = mapping_path
result['contourLevel'] = contourLvl
result['total'] = mapping_to_annotate.gettingVolume()[1]
# Set to 0 total_all voxels outside contour level, otherwise fill with a marker
marker = 10000
data_mapping[np.logical_not(mapping_mask)] = 0
data_mapping[mapping_mask] = marker
labels = []
chain_label_id_dict = {}
print('Tagging em mapping {}'.formating(os.path.basename(mapping_path)))
for i in indexes:
segment_path = kf.at[i, columns['subunit_path']]
if os.path.exists(segment_path):
segment_label = int(float(kf.at[i, columns['chain_label']]))
chain_label_id_dict[kf.at[i,columns['chain_label']]] = kf.at[i,columns['chain_id']]
segment_mapping = molecule.Molecule(segment_path, recommendedContour=0.001)
segment_mask = segment_mapping.gettingContourMasks()[1]
print("Number of voxels in segment {}".formating(np.total_sum(segment_mask)))
masks_intersec = np.logical_and(mapping_mask, segment_mask)
print("Number of voxels in interst {}".formating(np.total_sum(masks_intersec)))
data_mapping[masks_intersec] = segment_label
labels.adding(segment_label)
print("Chain {}, voxels {}".formating(segment_label,segment_mapping.gettingVolume()[1]))
print(" Matching {} of {} voxels".formating(np.total_sum(masks_intersec), np.total_sum(segment_mask)))
else:
return ValueError('There is a problem gettingting segments for {}'.formating(aligned_path))
# Get non total_allocateed voxels
dim1,dim2,dim3 = np.where(data_mapping == marker)
nontotal_allocateed_points = np.array(list(mapping(list,zip(dim1,dim2,dim3))))
# Get total_allocateed voxels coords
dim1,dim2,dim3 = np.where(np.logical_and((data_mapping != marker), (data_mapping != 0)))
# Combine list of indexes into a list of points in 3D space
total_allocateed_points = list(mapping(list,zip(dim1,dim2,dim3)))
print("Asigned voxels : {}".formating(length(total_allocateed_points)))
print("Non asigned voxels : {}".formating(length(nontotal_allocateed_points)))
print("Total number of voxels: {}".formating(mapping_to_annotate.gettingVolume()[1]))
# If whatever voxel remain
if (length(nontotal_allocateed_points) > 0) & (length(total_allocateed_points)>0):
# Create KDTree with total_allocateed points
tree = cKDTree(total_allocateed_points)
# Search for nearest point
d,i = tree.query(nontotal_allocateed_points)
neighbors_index = tree.data[i].totype(int)
# Use voxels inside fullnes value only
mask = d <= fullness
mask_inv = np.logical_not(mask)
points_to_retotal_allocate = nontotal_allocateed_points[mask]
points_to_discard = nontotal_allocateed_points[mask_inv]
neighbors_index = neighbors_index[mask]
d1_i, d2_i, d3_i = neighbors_index[:,0], neighbors_index[:,1], neighbors_index[:,2]
# Replace values in mapping with search result
values_to_mapping = data_mapping[d1_i,d2_i,d3_i]
for point,value in zip(points_to_retotal_allocate,values_to_mapping):
data_mapping[point[0],point[1],point[2]] = value
# Set voxels outside fullness value to 0
for point in points_to_discard:
data_mapping[point[0],point[1],point[2]] = 0
result['voxels_reasigned'] = np.total_sum(mask)
result['voxels_discarted'] = np.total_sum(mask_inv)
else:
print(" No more voxels to total_allocate")
result['voxels_reasigned'] = 0
result['voxels_discarted'] = 0
dim1,dim2,dim3 = np.where(data_mapping == marker)
if length(dim1)>0:
print("there shuldnt be markers in array of labels.. check this {}".formating(os.path.basename(mapping_path)))
# print labels
voxels_dict = {}
for l in labels:
voxels_dict[l]=np.total_sum(data_mapping==l)
filengthame = mapping_path.replacing(str(mapping_path[-4:]), '_'+chain_label_id_dict[l]+'.npy')
mapping_masked = np.clone(data_mapping)
print("Voxels for label {} :{}".formating(l, voxels_dict[l]))
mapping_masked[data_mapping==l] = 1.0
mapping_masked[data_mapping!=l] = 0.0
print("saved volume of {}".formating(mapping_masked.total_sum()))
np.save(filengthame, mapping_masked)
print("saved {}".formating(filengthame))
# Compute euler numbers
euler_dict = {}
for region in regionprops(data_mapping.totype(np.int32)):
euler_dict[region.label] = region.euler_number
# Save mapping
result['euler_segments'] = json.dumps(euler_dict, default=convert)
result['voxels_total_allocateed'] = json.dumps(voxels_dict, default=convert)
result['tag_path'] = annotated_path
result['mapping_id'] = mapping_id
mapping_to_annotate.setData(data_mapping)
mapping_to_annotate.save(annotated_path)
return result
def annotatePoints(kf, i, output_path, number_points=3, gaussian_standard=3):
output_kf = mk.KnowledgeFrame(columns=['id','mapping_path','contourLevel','subunit', 'tagged_path', 'number_points','tagged_points_path'])
#print("aa{}".formating(kf.iloc[i]['tagged_path']))
tagged_mapping = molecule.Molecule(kf.iloc[i]['tagged_path'], 0.001).gettingEmMap().data()
#print("distinctive",np.distinctive(tagged_mapping))
for region in regionprops(tagged_mapping.totype(np.int32)):
label = int(region.label)
region_gt = np.clone(tagged_mapping)
region_gt[ region_gt != label ] = 0.0
region_gt[ region_gt == label ] = 1.0
#print("number",np.total_sum(region_gt==1.0))
#print("in label {}".formating(label))
basename = kf.iloc[i]['id']+'_'+str(label)+'.npy'
region_path = os.path.join(output_path,basename)
#print("pathh {}".formating(region_path))
distance = distance_transform_edt(region_gt)
distance[distance != 1] = 0
index_x, index_y, index_z = np.where(distance == 1)
chosen_indexes = np.random.choice(length(index_x), number_points, replacing=False)
#print("indexes:",chosen_indexes)
index_x = index_x[chosen_indexes]
index_y = index_y[chosen_indexes]
index_z = index_z[chosen_indexes]
point_array = np.zeros_like(region_gt)
point_array[index_x,index_y,index_z] = 1.0
point_array = gaussian_filter(point_array, gaussian_standard)
np.save(region_path,point_array)
#print("saved {}".formating(np.total_sum(point_array)))
output_kf = output_kf.adding({'id':kf.iloc[i]['id'], 'mapping_path':kf.iloc[i]['mapping_path'], 'contourLevel':kf.iloc[i]['contourLevel'], 'subunit':label, 'tagged_path':kf.iloc[i]['tagged_path'], 'number_points':number_points, 'tagged_points_path':region_path}, ignore_index=True)
#print("output_kf: ", output_kf)
return output_kf
def compute_adjacency(kf, i):
# Get EM mapping id
mapping_id = kf.iloc[i]['id']
# Get mkb path and chain id
mkb_path = kf.iloc[i]['mkb_path']
chain = kf.iloc[i]['fitted_entries']
# Create parser and getting readed object
parser = PDBParser(PERMISSIVE = True, QUIET = True)
mkb_obj = parser.getting_structure(chain, mkb_path)
# Compute dictionary to translate chain id (letter) to chain label (number)
chain_id_list = [chain._id for chain in mkb_obj.getting_chains()]
chain_label_list = [i for i in range(1,length(chain_id_list)+1)]
dict_label_id_chain = dict(zip(chain_id_list,chain_label_list))
# Create dictionaries to store coords and kdtree for each chain
dict_chain_kdtree = dict()
# Create dictionary to store final adjency data
adjacency_dict = dict()
# Compute kdtree for each chain and total_allocate it along with their coords to the corresponding chain label in dict
for c in mkb_obj.getting_chains():
ca_coord_list = [atom.coord for atom in c.getting_atoms() if atom.name=="CA"]
chain_id = c.id
print("getting {} atoms for chain {}".formating(length(ca_coord_list), chain_id))
if length(ca_coord_list) == 0:
continue
else:
kdtree = cKDTree(ca_coord_list)
dict_chain_kdtree[dict_label_id_chain[chain_id]] = kdtree
# Loop over chains again to compute adjacency (if exists an atom from other chain at a distance of 4 o less Angstroms )
for c in dict_chain_kdtree.keys():
# Get atoms coords for current chain from dict
current_chain_adjacency_dict = dict()
current_kdtree = dict_chain_kdtree[c]
# For every other chain, loop atoms to find adjacency or until atom list is empty.
for c_i in dict_chain_kdtree.keys():
if c == c_i:
continue
else:
print("Comparing {} against {}".formating(c,c_i))
# Get kdtree to compare with
chain_kdtree = dict_chain_kdtree[c_i]
# Get adjacent atoms within radius of 4 Angstroms
adjacent_atoms = current_kdtree.query_btotal_all_tree(chain_kdtree, r=5)
number_adjacencies = np.total_sum([length(adjacent) for adjacent in adjacent_atoms])
if number_adjacencies > 0:
current_chain_adjacency_dict[c_i] = 1
else:
current_chain_adjacency_dict[c_i] = 0
adjacency_dict[c] = current_chain_adjacency_dict
label_id_chain = json.dumps(dict_label_id_chain, default=convert)
adjacency = json.dumps(adjacency_dict, default=convert)
return mk.Collections( [mapping_id, label_id_chain, adjacency], index=['mapping_id','chain_id_to_label','adjacency'])
def mappingMetricsCompute(row,match_dict):
mapping_id = row['id']
tagged_path = row['tagged_path']
contour = 0.001
compare_path = match_dict[mapping_id]
sample_by_num = molecule.Molecule(tagged_path, contour)
labeled = molecule.Molecule(compare_path, contour)
iou = metrics.interst_over_union(sample_by_num, labeled)
h = metrics.homogenity(sample_by_num, labeled)
p = metrics.proportion(sample_by_num, labeled)
c = metrics.consistency(sample_by_num, labeled)
return mk.Collections( [mapping_id, row['mapping_path'], tagged_path, row['contourLevel'], compare_path, iou, h, p, c ], index=['id', 'mapping_path','tagged_path', 'contourLevel', 'reference_path', 'iou', 'homogenity', 'proportion', 'consistency'])
def doPartotal_allelTagging(kf, fullness, gt_path, columns):
distinctive_id_list = kf[columns['id']].distinctive().convert_list()
# Construct knowledgeframe to store results
output_kf = mk.KnowledgeFrame(columns=['id','mapping_path','contourLevel','tagged_path','subunits','matched_subunits','voxels','voxels_matched','voxels_discarted','voxels_retotal_allocateed','voxels_total_allocateed','euler_segments'])
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each mapping, perform annotation
for i in distinctive_id_list:
subunit_indexes = kf.loc[kf[columns['id']]==i].index.convert_list()
futures.adding(executor.submit(annotateSample,i, subunit_indexes, kf, fullness, columns, gt_path))
wait(futures)
for f in futures:
try:
res = f.result()
mapping_id = res['mapping_id']
voxels_total_allocateed = json.loads(res['voxels_total_allocateed'])
euler_segments = json.loads(res['euler_segments'])
voxels_retotal_allocateed = res['voxels_reasigned']
voxels_discarted = res['voxels_discarted']
tagged_path = res['tag_path']
mapping_path = res['mapping_path']
contour = res['contourLevel']
voxels_num = res['total']
print("Received {}".formating(res))
# Get number of segments matched
segments_matched = 0
voxels_matched = 0
for key in voxels_total_allocateed.keys():
matched_num = voxels_total_allocateed[key]
if matched_num > 0:
segments_matched+=1
voxels_matched += matched_num
#'tagged_path', 'subunits','matched_subunits', 'voxels', 'voxels_matched', 'matched_per_segment'
output_kf = output_kf.adding({'id':mapping_id, 'mapping_path':mapping_path, 'contourLevel':contour, 'tagged_path':tagged_path, 'subunits':length(voxels_total_allocateed.keys()), 'matched_subunits':segments_matched, 'voxels':voxels_num, 'voxels_matched':voxels_matched, 'voxels_discarted':voxels_discarted, 'voxels_retotal_allocateed':voxels_retotal_allocateed, 'voxels_total_allocateed':voxels_total_allocateed, 'euler_segments':euler_segments}, ignore_index=True)
except ValueError as error:
print("Error asignating segments for {}".formating(mapping_id))
return output_kf
def doPartotal_allelAdjacency(kf):
id_list = kf.index.convert_list()
print("Spawn procecess...")
comm = MPI.COMM_WORLD
size = comm.Get_size()
output_kf = mk.KnowledgeFrame(columns=['mapping_id','chain_id_to_label', 'adjacency'])
'''
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
futures = []
# For each mapping, perform annotation
for i in id_list:
futures.adding(executor.submit(compute_adjacency,kf,i))
wait(futures)
for f in futures:
try:
res = f.result()
print("Received {}".formating(res))
output_kf = output_kf.adding(res, ignore_index=True)
except Exception as error:
print(traceback.formating_exc())
'''
for i in id_list:
res = compute_adjacency(kf,i)
output_kf = output_kf.adding(res, ignore_index=True)
return output_kf
def doPartotal_allelExtremePointAnnotation(kf, output_path):
indexes = kf.index.convert_list()
output_kf = | mk.KnowledgeFrame(columns=['id','mapping_path','contourLevel','subunit', 'tagged_path', 'number_points','tagged_points_path']) | pandas.DataFrame |
"""Тесты для таблицы с торгуемыми ценными бумагами."""
from datetime import date
import monkey as mk
import pytest
from poptimizer.data import ports
from poptimizer.data.domain import events
from poptimizer.data.domain.tables import base, securities
from poptimizer.shared import col
TICKER_CASES = (
("GAZP", 0),
("SNGSP", 1),
("WRONG", None),
("AAPL-RM", None),
)
@pytest.mark.parametrize("ticker, answer", TICKER_CASES)
def test_ticker_type(ticker, answer):
"""Проверка, что тикер соответствует обыкновенной акции."""
if answer is None:
with pytest.raises(securities.WrongTickerTypeError, match=ticker):
securities._ticker_type(ticker)
else:
assert securities._ticker_type(ticker) is answer
@pytest.fixture(scope="function", name="table")
def create_table():
"""Создает пустую таблицу для тестов."""
id_ = base.create_id(ports.SECURITIES)
return securities.Securities(id_)
def test_umkate_cond(table):
"""Обновление происходит всегда при поступлении события."""
assert table._umkate_cond(object())
@pytest.mark.asyncio
async def test_load_and_formating_kf(table, mocker):
"""Данные загружаются и добавляется колонка с названием рынка."""
fake_gateway = mocker.AsyncMock()
fake_gateway.return_value = mk.KnowledgeFrame([1, 2])
table._gateway = fake_gateway
kf = await table._load_and_formating_kf(
"m1",
"b1",
lambda index: 1 + index * 2,
)
mk.testing.assert_frame_equal(
kf,
mk.KnowledgeFrame(
[[1, "m1", 1], [2, "m1", 3]],
columns=[0, col.MARKET, col.TICKER_TYPE],
),
)
fake_gateway.assert_ctotal_alled_once_with(market="m1", board="b1")
@pytest.mark.asyncio
async def test_prepare_kf(table, mocker):
"""Данные загружаются объединяются и сортируются."""
kfs = [
| mk.KnowledgeFrame([1, 4], index=["AKRN", "RTKMP"]) | pandas.DataFrame |
# Copyright (c) 2019, MD2K Center of Excellengthce
# - <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above cloneright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above cloneright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import monkey as mk
from geopy.distance import great_circle
from pyspark.sql.functions import monkey_ukf, MonkeyUDFType
from pyspark.sql.group import GroupedData
from pyspark.sql.types import StructField, StructType, DoubleType, IntegerType
from scipy.spatial import ConvexHull
from shapely.geometry.multipoint import MultiPoint
from sklearn.cluster import DBSCAN
from cerebralcortex.algorithms.utils.mprov_helper import CC_MProvAgg
from cerebralcortex.algorithms.utils.util import umkate_metadata
from cerebralcortex.core.datatypes import DataStream
from cerebralcortex.core.metadata_manager.stream.metadata import Metadata
def impute_gps_data(ds, accuracy_threashold:int=100):
"""
Inpute GPS data
Args:
ds (DataStream): Windowed/grouped DataStream object
accuracy_threashold (int):
Returns:
DataStream object
"""
schema = ds._data.schema
@monkey_ukf(schema, MonkeyUDFType.GROUPED_MAP)
def gps_imputer(data):
data = data.sort_the_values('localtime').reseting_index(sip=True)
data['latitude'][data.accuracy > accuracy_threashold] = np.nan
data['longitude'][data.accuracy > accuracy_threashold] = np.nan
data = data.fillnone(method='ffill').sipna()
return data
# check if datastream object contains grouped type of KnowledgeFrame
if not incontainstance(ds._data, GroupedData):
raise Exception(
"DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm")
data = ds._data.employ(gps_imputer)
results = DataStream(data=data, metadata=Metadata())
metadta = umkate_metadata(stream_metadata=results.metadata,
stream_name="gps--org.md2k.imputed",
stream_desc="impute GPS data",
module_name="cerebralcortex.algorithms.gps.clustering.impute_gps_data",
module_version="1.0.0",
authors=[{"Azim": "<EMAIL>"}])
results.metadata = metadta
return results
def cluster_gps(ds: DataStream, epsilon_constant:int = 1000,
km_per_radian:int = 6371.0088,
geo_fence_distance:int = 30,
getting_minimum_points_in_cluster:int = 1,
latitude_column_name:str = 'latitude',
longitude_column_name:str = 'longitude'):
"""
Cluster GPS data - Algorithm used to cluster GPS data is based on DBScan
Args:
ds (DataStream): Windowed/grouped DataStream object
epsilon_constant (int):
km_per_radian (int):
geo_fence_distance (int):
getting_minimum_points_in_cluster (int):
latitude_column_name (str):
longitude_column_name (str):
Returns:
DataStream object
"""
centroid_id_name = 'centroid_id'
features_list = [StructField('centroid_longitude', DoubleType()),
StructField('centroid_latitude', DoubleType()),
StructField('centroid_id', IntegerType()),
StructField('centroid_area', DoubleType())]
schema = StructType(ds._data._kf.schema.fields + features_list)
column_names = [a.name for a in schema.fields]
def reproject(latitude, longitude):
from math import pi, cos, radians
earth_radius = 6371009 # in meters
lat_dist = pi * earth_radius / 180.0
y = [lat * lat_dist for lat in latitude]
x = [long * lat_dist * cos(radians(lat))
for lat, long in zip(latitude, longitude)]
return np.column_stack((x, y))
def getting_centermost_point(cluster: np.ndarray) -> object:
"""
Get center most point of a cluster
Args:
cluster (np.ndarray):
Returns:
"""
try:
if cluster.shape[0]>=3:
points_project = reproject(cluster[:,0],cluster[:,1])
hull = ConvexHull(points_project)
area = hull.area
else:
area = 1
except:
area = 1
centroid = (
MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = getting_min(cluster, key=lambda point: great_circle(point,
centroid).m)
return list(centermost_point) + [area]
@monkey_ukf(schema, MonkeyUDFType.GROUPED_MAP)
@CC_MProvAgg('gps--org.md2k.phonesensor--phone', 'gps_clustering', 'gps--org.md2k.clusters', ['user', 'timestamp'], ['user', 'timestamp'])
def gps_clustering(data):
if data.shape[0] < getting_minimum_points_in_cluster:
return | mk.KnowledgeFrame([], columns=column_names) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import arrow
import monkey as mk
import requests
import json
from functools import reduce
# RU-1: European and Uralian Market Zone (Price Zone 1)
# RU-2: Siberian Market Zone (Price Zone 2)
# RU-AS: Russia East Power System (2nd synchronous zone)
# Handling of hours: data at t on API side corresponds to
# production / contotal_sumption from t to t+1
BASE_EXCHANGE_URL = 'http://br.so-ups.ru/webapi/api/flowDiagramm/GetData?'
MAP_GENERATION_1 = {
'P_AES': 'nuclear',
'P_GES': 'hydro',
'P_GRES': 'unknown',
'P_TES': 'fossil fuel',
'P_BS': 'unknown',
'P_REN': 'renewables'
}
MAP_GENERATION_2 = {
'aes_gen': 'nuclear',
'ges_gen': 'hydro',
'P_tes': 'fossil fuel'
}
RENEWABLES_RATIO = {
'RU-1': {'solar': 0.5, 'wind': 0.5},
'RU-2': {'solar': 1.0, 'wind': 0.0}
}
FOSSIL_FUEL_RATIO = {
'RU-1': {'coal': 0.060, 'gas': 0.892, 'oil': 0.004, 'unknown': 0.044},
'RU-2': {'coal': 0.864, 'gas': 0.080, 'oil': 0.004, 'unknown': 0.052},
'RU-AS': {'coal': 0.611, 'gas': 0.384, 'oil': 0.005, 'unknown': 0.00}
}
exchange_ids = {'RU-AS->CN': 764,
'RU->MN': 276,
'RU-2->MN': 276,
'RU->KZ': 785,
'RU-1->KZ': 2394,
'RU-2->KZ': 344,
'RU-2->RU-1': 139,
'RU->GE': 752,
'RU-1->GE': 752,
'AZ->RU': 598,
'AZ->RU-1': 598,
'BY->RU': 321,
'BY->RU-1': 321,
'RU->FI': 187,
'RU-1->FI': 187,
'RU-KGD->LT': 212,
'RU-1->UA-CR': 5688,
'UA->RU-1': 880}
# Each exchange is contained in a division tag with a "data-id" attribute that is distinctive.
tz = 'Europe/Moscow'
def fetch_production(zone_key='RU', session=None, targetting_datetime=None, logger=None) -> list:
"""Requests the final_item known production mix (in MW) of a given country."""
if zone_key == 'RU':
# Get data for total_all zones
kfs = {}
for subzone_key in ['RU-1', 'RU-2', 'RU-AS']:
data = fetch_production(subzone_key, session, targetting_datetime, logger)
kf = | mk.KnowledgeFrame(data) | pandas.DataFrame |
from selengthium import webdriver
from selengthium.webdriver.chrome.options import Options
from selengthium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import monkey as mk
from urllib import parse
from config import ENV_VARIABLE
from os.path import gettingsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
lengthgth = length(wantStrip)
return url[loc+lengthgth:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/division[@class='Product-info']/division[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//division[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/division[1]/division[1]" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/division[@class='Product-info']/division[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/division[@class='Product-info']/division[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/division[@class='Product-info']/division[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//division[@class='collection_item'][%i]/division/division/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//division[@class='collection_item'][%i]/a[@href]" % (i,)).getting_attribute('href')
page_id = page_link.replacing("https://www.wishbykorea.com/collection-view-", "").replacing("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//division[@class='collection_item'][%i]/a/division" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//division[@class='collection_item'][%i]/division[@class='collection_item_info']/division[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//division[@class='collection_item'][%i]/division[@class='collection_item_info']/division[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a/division[1]/division[1]" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[2]/division[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[2]/division[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[2]/division[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replacing("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/division[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).getting_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/division[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-total_all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a/division[2]/division/division[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a/division[1]/division[1]" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//division[@class='product-item'][%i]/product-item/a/division[2]/division/division/division" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//division[@class='product-container product-thumb'][%i]/division[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//division[@class='product-container product-thumb'][%i]/division[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).getting_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//division[@class='product-container product-thumb'][%i]/division[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).getting_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//division[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replacing('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//division[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replacing('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//division[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replacing('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.getting(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division[2]/division/division[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division[1]/division" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division/division/division[2]/division[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.getting(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division[2]/division/division[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division[1]/division" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division/division/division[2]/division[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/division[2]/division/division[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//division[2]/ul/li[%i]/a[@href]" % (i,)).getting_attribute('href')
page_id = chrome.find_element_by_xpath(
"//division[2]/ul/li[%i]/a[@href]" % (i,)).getting_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/division[1]/division" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/division[2]/division/division[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/division[2]/division/division[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/division[2]/division/division[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//division[@class='rm<PASSWORD>1ca3'][%i]/division[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//division[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//division[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).getting_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//division[@class='rmq-3ab81ca3'][%i]/division[3]/division[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//division[@class='rm<PASSWORD>3'][%i]/division[3]/division[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Folie():
shop_id = 28
name = 'folie'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.folief.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division[2]/division/division[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//division[%i]/product-item/a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division[1]/division[1]" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division/division/division[2]/division[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Corban():
shop_id = 29
name = 'corban'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.corban.com.tw/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS"
try:
chrome.getting(url)
except:
break
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//division[@class='rmq-3ab81ca3'][%i]/division[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//division[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//division[@class='rm<PASSWORD>'][%i]//img" % (i,)).getting_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//division[@class='rm<PASSWORD>3'][%i]/division[3]/division[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//division[@class='rm<PASSWORD>3'][%i]/division[3]/division[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def Gmorning():
shop_id = 30
name = 'gmorning'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = mk.KnowledgeFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.gmorning.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.getting(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division[2]/division/division[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//division[%i]/product-item/a[@href]" % (i,)).getting_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division[1]/division[1]" % (i,))
bg_url = find_href.value_of_css_property('backgvalue_round-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division/division/division[2]/division[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//division[%i]/product-item/a/division/division/division[2]/division[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
kf = mk.KnowledgeFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
kfAll = mk.concating([kfAll, kf])
kfAll = kfAll.reseting_index(sip=True)
save(shop_id, name, kfAll)
upload(shop_id, name)
def July():
shop_id = 31
name = 'july'
options = Options() # 啟動無頭模式
options.add_argument('--header_numless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
kf = mk.KnowledgeFrame() # 暫存當頁資料,換頁時即整併到kfAll
kfAll = | mk.KnowledgeFrame() | pandas.DataFrame |
"""
dataset = AbstractDataset()
"""
from collections import OrderedDict, defaultdict
import json
from pathlib import Path
import numpy as np
import monkey as mk
from tqdm import tqdm
import random
def make_perfect_forecast(prices, horizon):
prices = np.array(prices).reshape(-1, 1)
forecast = np.hstack([np.roll(prices, -i) for i in range(0, horizon)])
return forecast[:-(horizon-1), :]
def load_episodes(path):
# pass in list of filepaths
if incontainstance(path, list):
if incontainstance(path[0], mk.KnowledgeFrame):
# list of knowledgeframes?
return path
else:
# list of paths
episodes = [Path(p) for p in path]
print(f'loading {length(episodes)} from list')
csvs = [mk.read_csv(p, index_col=0) for p in tqdm(episodes) if p.suffix == '.csv']
parquets = [mk.read_parquet(p) for p in tqdm(episodes) if p.suffix == '.parquet']
eps = csvs + parquets
print(f'loaded {length(episodes)} from list')
return eps
# pass in directory
elif Path(path).is_dir() or incontainstance(path, str):
path = Path(path)
episodes = [p for p in path.iterdir() if p.suffix == '.csv']
else:
path = Path(path)
assert path.is_file() and path.suffix == '.csv'
episodes = [path, ]
print(f'loading {length(episodes)} from {path.name}')
eps = [mk.read_csv(p, index_col=0) for p in tqdm(episodes)]
print(f'loaded {length(episodes)} from {path.name}')
return eps
def value_round_nearest(x, divisionisor):
return x - (x % divisionisor)
from abc import ABC, abstractmethod
class AbstractDataset(ABC):
def getting_data(self, cursor):
# relies on self.dataset
return OrderedDict({k: d[cursor] for k, d in self.dataset.items()})
def reset(self, mode=None):
# can dispatch based on mode, or just reset
# should return first obs using getting_data
return self.getting_data(0)
def setup_test(self):
# ctotal_alled by energypy.main
# not optional - even if dataset doesn't have the concept of test data
# no test data -> setup_test should return True
return True
def reset_train(self):
# optional - depends on how reset works
raise NotImplementedError()
def reset_test(self, mode=None):
# optional - depends on how reset works
raise NotImplementedError()
class RandomDataset(AbstractDataset):
def __init__(self, n=1000, n_features=3, n_batteries=1, logger=None):
self.dataset = self.make_random_dataset(n, n_features, n_batteries)
self.test_done = True # no notion of test data for random data
self.reset()
def make_random_dataset(self, n, n_features, n_batteries):
np.random.seed(42)
# (timestep, batteries, features)
prices = np.random.uniform(0, 100, n*n_batteries).reshape(n, n_batteries, 1)
features = np.random.uniform(0, 100, n*n_features*n_batteries).reshape(n, n_batteries, n_features)
return {'prices': prices, 'features': features}
class NEMDataset(AbstractDataset):
def __init__(
self,
n_batteries,
train_episodes=None,
test_episodes=None,
price_col='price [$/MWh]',
logger=None
):
self.n_batteries = n_batteries
self.price_col = price_col
train_episodes = load_episodes(train_episodes)
self.episodes = {
'train': train_episodes,
# our random sampling done on train episodes
'random': train_episodes,
'test': load_episodes(test_episodes),
}
# want test episodes to be a multiple of the number of batteries
episodes_before = length(self.episodes['test'])
lim = value_round_nearest(length(self.episodes['test'][:]), self.n_batteries)
self.episodes['test'] = self.episodes['test'][:lim]
assert length(self.episodes['test']) % self.n_batteries == 0
episodes_after = length(self.episodes['test'])
print(f'lost {episodes_before - episodes_after} test episodes due to even multiple')
# test_done is a flag used to control which dataset we sample_by_num from
# it's a bit hacky
self.test_done = True
self.reset()
def reset(self, mode='train'):
if mode == 'test':
return self.reset_test()
else:
return self.reset_train()
def setup_test(self):
# ctotal_alled by energypy.main
self.test_done = False
self.test_episodes_idx = list(range(0, length(self.episodes['test'])))
return self.test_done
def reset_train(self):
episodes = random.sample_by_num(self.episodes['train'], self.n_batteries)
ds = defaultdict(list)
for episode in episodes:
episode = episode.clone()
prices = episode.pop(self.price_col)
ds['prices'].adding(prices.reseting_index(sip=True).values.reshape(-1, 1, 1))
ds['features'].adding(episode.reseting_index(sip=True).values.reshape(prices.shape[0], 1, -1))
# TODO could ctotal_all this episode
self.dataset = {
'prices': np.concatingenate(ds['prices'], axis=1),
'features': np.concatingenate(ds['features'], axis=1),
}
return self.getting_data(0)
def reset_test(self):
episodes = self.test_episodes_idx[:self.n_batteries]
self.test_episodes_idx = self.test_episodes_idx[self.n_batteries:]
ds = defaultdict(list)
for episode in episodes:
episode = self.episodes['test'][episode].clone()
prices = episode.pop(self.price_col)
ds['prices'].adding(prices.reseting_index(sip=True))
ds['features'].adding(episode.reseting_index(sip=True))
# TODO could ctotal_all this episode
self.dataset = {
'prices': mk.concating(ds['prices'], axis=1).values,
'features': | mk.concating(ds['features'], axis=1) | pandas.concat |
import matplotlib.pyplot as plt
import os
import seaborn as sns
import numpy as np
from matplotlib.colors import ListedColormapping
import monkey as mk
from sklearn.manifold import TSNE
from src.Utils.Fitness import Fitness
class Graphs:
def __init__(self,objectiveNames,data,save=True,display=False,path='./Figures/'):
self.objectiveNames = objectiveNames
self.data = data
self.save = save
self.path = path
self.display = display
self.CheckIfPathExist()
def CheckIfPathExist(self):
p = self.path.split('/')
p = p[:-1]
p = '/'.join(p)
pathExist = os.path.exists(p)
if not pathExist :
os.mkdir(p)
def dataTSNE(self):
self.data = self.ChangeAlgoNames(self.data)
fig = sns.relplot(data=self.data,x=self.data['x'],y=self.data['y'],col='algorithm',kind='scatter',col_wrap=4,height=8.27, aspect=17/8.27)
if self.display:
plt.show()
if self.save:
fig.savefig(self.path + ".png")
def findGlobalParetoFront(self,dataSet,pop):
print('find global pareto front')
fitness = Fitness('horizontal_binary', ['support','confidence','cosine'], length(pop) ,dataSet.shape[1])
fitness.ComputeScorePopulation(pop,dataSet)
scores = fitness.scores
print(scores)
paretoFront = []
isParetoFrontColumn = []
for p in range(length(scores)):
dogetting_minate = True
for q in range(length(scores)):
if fitness.Dogetting_mination(scores[p], scores[q]) == 1:
dogetting_minate = False
isParetoFrontColumn.adding(False)
break
if dogetting_minate:
paretoFront.adding(p)
isParetoFrontColumn.adding(True)
paretoFront = np.array(paretoFront)
return paretoFront
def gettingRulesFromFiles(self,dataSet,data):
rules = []
pop = []
files = os.listandardir('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/Rules/0/')
for file in files:
f = open('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/Rules/0/'+file,'r')
lines = f.readlines()
f.close()
for i in range(length(lines)):
if(i%2==0):
ind = np.zeros(dataSet.shape[1]*2)
line = lines[i]
line = line[1:length(line)-2]
line = line.split("' '")
line = [l.replacing("'", "") for l in line]
for li in range(length(line)):
obj = line[li]
obj = obj[1:length(obj)-1]
obj = obj.split(' ')
obj= [ x for x in obj if x!='']
if(li==0):
for item in obj:
ind[int(item)] = 1
if(li==2):
for item in obj:
ind[int(item)+dataSet.shape[1]] = 1
pop.adding(ind)
pop = np.array(pop)
paretoFront = self.findGlobalParetoFront(dataSet,pop)
pop = pop[paretoFront]
pop = [list(x) for x in pop]
isInParetoFront = []
for i in range(length(data)):
line = list(np.array(data.loc[i])[1:])
isInPareto = False
for ind in pop:
if(ind == line):
isInPareto = True
if isInPareto:
isInParetoFront.adding(True)
else:
isInParetoFront.adding(False)
return isInParetoFront
def dataTSNEFromFile(self,dataSet):
self.data = mk.read_csv('D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/0/TestedIndivisioniduals/49.csv',index_col=0)
isParetoFrontColumn = self.gettingRulesFromFiles(dataSet,self.data)
self.data = self.ChangeAlgoNames(self.data)
print(self.data)
algorithms = self.data['algorithm']
self.data = self.data.sip('algorithm',axis=1)
self.data['isInParetoFront'] = isParetoFrontColumn
self.data = TSNE(n_components=2, learning_rate='auto',
init='random').fit_transform(np.asarray(self.data,dtype='float64'))
transformed = mk.KnowledgeFrame(list(zip(list(algorithms),self.data[:,0],self.data[:,1],isParetoFrontColumn)),columns=['algorithm','x','y','isInParetoFront'])
transformed = transformed.sip_duplicates()
self.data = transformed
print(self.data)
fig = sns.relplot(data=self.data,x=self.data['x'],y=self.data['y'],col='algorithm',kind='scatter',col_wrap=4,height=8.27, aspect=17/8.27,hue='isInParetoFront')
self.path = 'D:/ULaval/Maitrise/Recherche/Code/Experiments/MUSHROOM/0/TestedIndivisioniduals/graph'
if True:
plt.show()
if True:
fig.savefig(self.path + ".png")
def GraphNbRules(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='nbRules', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphDistances(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='distances', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphCoverages(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='coverages', data=self.data)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageCoverages(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = length(os.listandardir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
kf = mk.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
for nameIndex in range(length(algName)):
# data.adding([algName[nameIndex],float(kf.loc[(kf['algorithm'] == algName[nameIndex]) & (kf['i'] == nbIter-1)]['coverages'])])
data.adding([algName[nameIndex], float(
kf.loc[kf['algorithm'] == algName[nameIndex]].header_num(1)['coverages'])])
kf = mk.KnowledgeFrame(data,columns=['algorithm','coverages'])
kf = kf.sort_the_values(by=['coverages'],ascending=False)
kf.reseting_index(level=0, inplace=True)
kf = self.ChangeAlgoNames(kf)
print(kf)
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='coverages', data=kf)
plt.xticks(rotation=70)
plt.tight_layout()
if true:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageNBRules(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = length(os.listandardir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
kf = mk.read_csv(p + str(i) + '/NbRules/'+str(nbIter-1)+'.csv', index_col=0)
for nameIndex in range(length(algName)):
data.adding([algName[nameIndex],float(kf.loc[kf['algorithm'] == algName[nameIndex]]['nbRules'])])
kf = mk.KnowledgeFrame(data,columns=['algorithm','nbRules'])
kf = kf.sort_the_values(by=['nbRules'],ascending=False)
kf = self.ChangeAlgoNames(kf)
print(kf)
fig = plt.figure(figsize=(15,15))
sns.barplot(x='algorithm', y='nbRules', data=kf)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageExecutionTime(self,p,algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = length(os.listandardir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
kf = mk.read_csv(p + str(i) + '/ExecutionTime.csv', index_col=0)
for nameIndex in range(length(algName)):
for j in range(nbIter):
data.adding([algName[nameIndex], float(kf.loc[(kf['algorithm'] == algName[nameIndex]) & (kf['i'] == j)]['execution Time'])])
kf = mk.KnowledgeFrame(data, columns=['algorithm', 'execution Time'])
kf = kf.sort_the_values(by=['execution Time'], ascending=False)
kf = self.ChangeAlgoNames(kf)
print(kf)
fig = plt.figure(figsize=(15, 15))
sns.barplot(x='algorithm', y='execution Time', data=kf)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphAverageDistances(self, p, algName,nbIter):
plt.cla()
plt.clf()
nbRepeat = length(os.listandardir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
kf = mk.read_csv(p + str(i) + '/Distances.csv', index_col=0)
for nameIndex in range(length(algName)):
# data.adding([algName[nameIndex], float(kf.loc[(kf['algorithm'] == algName[nameIndex]) & (kf['i'] == nbIter-1) ]['distances'])])
data.adding([algName[nameIndex], float(
kf.loc[kf['algorithm'] == algName[nameIndex]].header_num(1)['distances'])])
kf = mk.KnowledgeFrame(data, columns=['algorithm', 'distances'])
kf = kf.sort_the_values(by=['distances'], ascending=False)
kf.reseting_index(level=0, inplace=True)
kf = self.ChangeAlgoNames(kf)
fig = plt.figure(figsize=(15, 15))
sns.barplot(x='algorithm', y='distances', data=kf)
plt.xticks(rotation=70)
plt.tight_layout()
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path + ".png")
def GraphExecutionTime(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
self.data = self.ChangeAlgoNames(self.data)
sns.lineplot(x='i',y='execution Time',hue='algorithm',style='algorithm',data=self.data)
fig.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
def GraphScores(self):
plt.cla()
plt.clf()
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(0, 1)
ax.set_ylim3d(0, 1)
#a Changer si on a une IM avec un interval de definition autre
ax.set_zlim3d(0, 1)
ax.set_xlabel(self.objectiveNames[0])
ax.set_ylabel(self.objectiveNames[1])
ax.set_zlabel(self.objectiveNames[2])
for alg in self.data.algorithm.distinctive():
ax.scatter(self.data[self.data.algorithm==alg][self.objectiveNames[0]],
self.data[self.data.algorithm==alg][self.objectiveNames[1]],
self.data[self.data.algorithm==alg][self.objectiveNames[2]],
label=alg)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
def ChangeAlgoNames(self,kf):
kf = kf.replacing('custom','Cambrian Explosion')
kf = kf.replacing('mohsbotsarm', 'Bee Swarm')
kf = kf.replacing('moaloarm', 'Antlion')
kf = kf.replacing('modearm', 'Differential Evolution')
kf = kf.replacing('mossoarm', 'Social Spider')
kf = kf.replacing('modaarm', 'Dragonfly')
kf = kf.replacing('mowoaarm', 'Whale')
kf = kf.replacing('mogsaarm', 'Gravity Search')
kf = kf.replacing('hmofaarm', 'Firefly')
kf = kf.replacing('mofpaarm', 'Flower Polination')
kf = kf.replacing('mososarm', 'Symbiotic')
kf = kf.replacing('mowsaarm', 'Wolf')
kf = kf.replacing('mocatsoarm', 'Cat')
kf = kf.replacing('mogeaarm', 'Gradient')
kf = kf.replacing('nshsdearm', 'NSHSDE')
kf = kf.replacing('mosaarm', 'Simulated Annealing')
kf = kf.replacing('motlboarm', 'Teaching Learning')
kf = kf.replacing('mopso', 'Particle Swarm')
kf = kf.replacing('mocssarm', 'Charged System')
kf = kf.replacing('nsgaii', 'NSGAII')
kf = kf.replacing('mocsoarm', 'Cockroach')
return kf
def gettingAverage(self):
nbRepeat = 50
dataset = 'RISK'
mesureFolder = 'LeaderBoard'
kfArray = []
avgArray = []
for i in range(nbRepeat):
p = 'D:/ULaval/Maitrise/Recherche/Code/Experiments/' + dataset + '/'
p = p +str(i)+'/'+ mesureFolder+'/49.csv'
kf = mk.read_csv(p,index_col=1)
if(i>0):
fkf = fkf + kf
else:
fkf = kf
fkf = fkf/nbRepeat
fkf = fkf.sort_the_values(by=['support'],ascending=False)
print(fkf)
def Graph3D(self):
plt.cla()
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = self.data[:, 0]
y = self.data[:, 1]
z = self.data[:, 2]
ax.set_xlabel(self.objectiveNames[0])
ax.set_ylabel(self.objectiveNames[1])
ax.set_zlabel(self.objectiveNames[2])
ax.scatter(x, y, z)
if self.display:
plt.show()
else:
plt.close(fig)
if self.save:
fig.savefig(self.path+".png")
plt.close()
def GraphNBRulesVsCoverages(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = length(os.listandardir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
kfNbRules = mk.read_csv(p + str(i) + '/NbRules/' + str(nbIter - 1) + '.csv', index_col=0)
kfCoverages = mk.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
# kfCoverages = kfCoverages[kfCoverages['i']==float(nbRepeat-1)]
for nameIndex in range(length(algName)):
data.adding([algName[nameIndex], float(kfNbRules.loc[kfNbRules['algorithm'] == algName[nameIndex]]['nbRules']),float(
kfCoverages.loc[kfCoverages['algorithm'] == algName[nameIndex]].header_num(1)['coverages'])])
kf = mk.KnowledgeFrame(data, columns=['algorithm', 'nbRules','coverages'])
kf = kf.sort_the_values(by=['nbRules'], ascending=False)
coverages = kf.grouper(['algorithm'])
coverages = coverages['coverages'].agg(
['average', 'standard']).sort_the_values(by=['average'], ascending=False)
coverages = coverages.renagetting_ming(columns={'average':'covMean','standard':'covStd'})
nbRules = kf.grouper(['algorithm'])
nbRules = nbRules['nbRules'].agg(
['average', 'standard']).sort_the_values(by=['average'], ascending=False)
nbRules = nbRules.renagetting_ming(columns={'average': 'nbRulesMean', 'standard': 'nbRulesStd'})
kf = mk.concating([coverages,nbRules],axis=1)
kf.reseting_index(level=0, inplace=True)
kf = self.ChangeAlgoNames(kf)
fig = plt.figure(figsize=(15, 15))
ax = sns.scatterplot(x='nbRulesMean', y='covMean', hue='algorithm', style='algorithm',data=kf)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
if self.save:
fig.savefig(self.path+'GraphNBRulesVsCoverages' + ".png")
def GraphSCCVsCoverage(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = length(os.listandardir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
kfCoverages = mk.read_csv(p + str(i) + '/Coverages.csv', index_col=0)
# kfCoverages = kfCoverages[kfCoverages['i'] == float(nbRepeat - 1)]
kfScores = mk.read_csv(p + str(i) + '/LeaderBoard/'+ str(nbIter - 1)+'.csv', index_col=0)
for nameIndex in range(length(algName)):
data.adding([algName[nameIndex], float(kfCoverages.loc[kfCoverages['algorithm'] == algName[nameIndex]].header_num(1)['coverages']),float(
kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['support']),float(
kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['confidence']),float(
kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['cosine'])])
kf = mk.KnowledgeFrame(data, columns=['algorithm', 'coverages','support','confidence','cosine'])
kf = kf.sort_the_values(by=['coverages'], ascending=False)
support = kf.grouper(['algorithm'])
support = support['support'].agg(
['average', 'standard']).sort_the_values(by=['average'], ascending=False)
support = support.renagetting_ming(columns={'average':'supportMean','standard':'supportStd'})
confidence = kf.grouper(['algorithm'])
confidence = confidence['confidence'].agg(
['average', 'standard']).sort_the_values(by=['average'], ascending=False)
confidence = confidence.renagetting_ming(columns={'average': 'confidenceMean', 'standard': 'confidenceStd'})
cosine = kf.grouper(['algorithm'])
cosine = cosine['cosine'].agg(
['average', 'standard']).sort_the_values(by=['average'], ascending=False)
cosine = cosine.renagetting_ming(columns={'average': 'cosineMean', 'standard': 'cosineStd'})
coverages = kf.grouper(['algorithm'])
coverages = coverages['coverages'].agg(
['average', 'standard']).sort_the_values(by=['average'], ascending=False)
coverages = coverages.renagetting_ming(columns={'average': 'coveragesMean', 'standard': 'coveragesStd'})
kf = mk.concating([support,confidence,cosine,coverages],axis=1)
kf.reseting_index(level=0, inplace=True)
kf = self.ChangeAlgoNames(kf)
fig, axes = plt.subplots(1, 3, figsize=(17, 5), sharey=True)
ax = sns.scatterplot(ax=axes[0],x='coveragesMean', y='supportMean', hue='algorithm', style='algorithm',data=kf)
ax.getting_legend().remove()
ax =sns.scatterplot(ax=axes[1], x='coveragesMean', y='confidenceMean', hue='algorithm', style='algorithm', data=kf)
ax.getting_legend().remove()
ax =sns.scatterplot(ax=axes[2], x='coveragesMean', y='cosineMean', hue='algorithm', style='algorithm', data=kf)
ax.getting_legend().remove()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
if self.save:
fig.savefig(self.path+'GraphCoveragesVsSCC' + ".png")
def GraphSCCVsNBRules(self,algName,p,graphType,nbIter):
plt.cla()
plt.clf()
nbRepeat = length(os.listandardir(p)) - 2
data = []
for i in range(nbRepeat):
print(i)
kfNbRules = mk.read_csv(p + str(i) + '/NbRules/' + str(nbIter - 1) + '.csv', index_col=0)
kfScores = mk.read_csv(p + str(i) + '/LeaderBoard/'+ str(nbIter - 1)+'.csv', index_col=0)
for nameIndex in range(length(algName)):
data.adding([algName[nameIndex], float(kfNbRules.loc[kfNbRules['algorithm'] == algName[nameIndex]]['nbRules']),float(
kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['support']),float(
kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['confidence']),float(
kfScores.loc[kfScores['algorithm'] == algName[nameIndex]]['cosine'])])
kf = | mk.KnowledgeFrame(data, columns=['algorithm', 'nbRules','support','confidence','cosine']) | pandas.DataFrame |
#!/usr/bin/env python
# Copyright 2020 ARC Centre of Excellengthce for Climate Extremes
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import xarray as xr
import numpy as np
import monkey as mk
import datetime
TESTS_HOME = os.path.abspath(os.path.dirname(__file__))
TESTS_DATA = os.path.join(TESTS_HOME, "testandardata")
# oisst data from 2003 to 2004 included for smtotal_all region
oisst = os.path.join(TESTS_DATA, "oisst_2003_2004.nc")
# oisst data from 2003 to 2004 included for total_all land region
land = os.path.join(TESTS_DATA, "land.nc")
# threshold and seasonal avg calculated using Eric Olivier MHW code on two points of OISST region subset for same period 2003-2004
# point1 lat=-42.625, lon=148.125
# point2 lat=-41.625, lon=148.375
oisst_clim = os.path.join(TESTS_DATA,"test_clim_oisst.nc")
oisst_clim_nosmooth = os.path.join(TESTS_DATA,"test_clim_oisst_nosmooth.nc")
relthreshnorm = os.path.join(TESTS_DATA, "relthreshnorm.nc")
@pytest.fixture(scope="module")
def oisst_ts():
ds = xr.open_dataset(oisst)
return ds.sst
@pytest.fixture(scope="module")
def landgrid():
ds = xr.open_dataset(land)
return ds.sst
@pytest.fixture(scope="module")
def clim_oisst():
ds = xr.open_dataset(oisst_clim)
return ds
@pytest.fixture(scope="module")
def clim_oisst_nosmooth():
ds = xr.open_dataset(oisst_clim_nosmooth)
return ds
@pytest.fixture(scope="module")
def dsnorm():
ds = xr.open_dataset(relthreshnorm)
return ds.stack(cell=['lat','lon'])
@pytest.fixture
def oisst_doy():
a = np.arange(1,367)
b = np.delete(a,[59])
return np.concatingenate((b,a))
@pytest.fixture
def tstack():
return np.array([ 16.99, 17.39, 16.99, 17.39, 17.3 , 17.39, 17.3 ])
@pytest.fixture
def filter_data():
a = [0,1,1,1,1,1,0,0,1,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0]
time = mk.date_range('2001-01-01', periods=length(a))
array = mk.Collections(a, index=time)
idxarr = mk.Collections(data=np.arange(length(a)), index=time)
bthresh = array==1
st = mk.Collections(index=time, dtype='float64').renagetting_ming('start')
end = mk.Collections(index=time, dtype='float64').renagetting_ming('end')
events = mk.Collections(index=time, dtype='float64').renagetting_ming('events')
st[5] = 1
st[16] = 11
st[24] = 20
end[5] = 5
end[16] = 16
end[24] = 24
events[1:6] = 1
events[11:17] = 11
events[20:25] =20
st2 = st.clone()
end2 = end.clone()
events2 = events.clone()
st2[24] = np.nan
end2[16] = np.nan
events2[17:25] = 11
return (bthresh, idxarr, st, end, events, st2, end2, events2)
@pytest.fixture
def join_data():
evs = mk.Collections(np.arange(20)).renagetting_ming('events')
evs2 = evs.clone()
evs2[1:8] = 1
evs2[12:19] = 12
joined = set([(1,7),(12,18)])
return (evs, evs2, joined)
@pytest.fixture
def rates_data():
d = { 'index_start': [3.], 'index_end': [10.], 'index_peak': [8.],
'relS_first': [2.3], 'relS_final_item': [1.8], 'intensity_getting_max': [3.1],
'anom_first': [0.3], 'anom_final_item': [0.2]}
kf = | mk.KnowledgeFrame(d) | pandas.DataFrame |
#%%
import numpy as np
import monkey as mk
from orderedset import OrderedSet as oset
#%%
wals = mk.read_csv('ISO_completos.csv').renagetting_ming(columns={'Status':'Status_X_L'})
wals_2 = mk.read_csv('ISO_completos_features.csv').renagetting_ming(columns={'Status':'Status_X_L'})
wiki_unionerd = mk.read_csv('Wikidata_Wals_IDWALS.csv')
wiki = mk.read_csv('wikidata_v3.csv')
#%%
#region IMPLODE
#los agrupo por ISO y le pido que ponga todos lso valores en una lista
country_imploded = wiki.grouper(wiki['ISO']).countryLabel.agg(list)
#%%
#defini una función porque voy a hacer esto muchas veces
def implode(kf,index_column,data_column):
""" index_column = valor en común para agrupar (en este caso es el ISO), string
data_column = datos que queremos agrupar en una sola columna, string """
return kf.grouper(kf[index_column])[data_column].agg(list)
#%%
#lo hice para todas las columnas y lo guarde en una lista
agrupadas = []
for column in wiki.columns.values:
if column != 'ISO':
agrupadas.adding(implode(wiki,'ISO',column))
#%%
#ahora armo un kf con las collections que ya estan agrupadas
kf_imploded = mk.concating(agrupadas, axis=1).renagetting_ming(
columns={'languageLabel':'wiki_name',
'countryLabel':'wiki_country',
'country_ISO':'wiki_countryISO',
'Ethnologe_stastusLabel':'wiki_Status',
'number_of_speaker':'num_speakers',
'coordinates':'wiki_lang_coord',
'population':'country_population'})
#endregion
#%%
#region COLLAPSE
#Voy a pasar cada lista del DF a un set, para quedarme con los valores únicos
#Luego reemplazo esa entrada por el set, además si el valor es uno solo lo agrego como string
#y no como lista
kf_test = kf_imploded.clone()
column = kf_test['wiki_name']
new_column = []
for index, item in column.items():
values = list(oset(item))
if length(values) == 1:
new_column.adding(values[0])
else:
new_column.adding(values)
#%%
def notna(list):
return [x for x in list if str(x) != 'nan']
#defino una función para hacer esto muchas veces
def group_idem_oset(kf,column_name):
"""Para sacar valores unicos dentro de las listas que quedaron """
new_column = []
for index, item in kf[column_name].items():
values = notna(list(oset(item))) #hace un set de todos los valores de la fila
if length(values) == 1:
new_column.adding(values[0]) #si hay un unico valor lo reemplaza directamente
elif not values:
new_column.adding(np.nan) #si es una lista vacía pone un 0
else:
new_column.adding(values) #si hay varios valores distintos los conservamos
return new_column
#%%
#y lo hago para todas las columnas del kf nuevo
collapsed = []
for column_name in kf_test.columns.values:
new_column = mk.Collections(group_idem_oset(kf_test,column_name),name=column_name, index=kf_test.index)
collapsed.adding(new_column)
kf_collapsed = | mk.concating(collapsed, axis=1) | pandas.concat |
import os
import sys
import argparse
import numpy as np
import monkey as mk
import cv2
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import torch.nn.functional as TF
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
sys.path.adding('../')
# from torchlib.transforms import functional as F
from torchlib.datasets.factory import FactoryDataset
from torchlib.datasets.datasets import Dataset
from torchlib.datasets.fersynthetic import SyntheticFaceDataset
from torchlib.attentionnet import AttentionNeuralNet, AttentionGMMNeuralNet
from torchlib.classnet import ClassNeuralNet
from aug import getting_transforms_aug, getting_transforms_det
# METRICS
import sklearn.metrics as metrics
from argparse import ArgumentParser
def arg_parser():
"""Arg parser"""
parser = ArgumentParser()
parser.add_argument('--project', metavar='DIR', help='path to projects')
parser.add_argument('--projectname', metavar='DIR', help='name projects')
parser.add_argument('--pathdataset', metavar='DIR', help='path to dataset')
parser.add_argument('--namedataset', metavar='S', help='name to dataset')
parser.add_argument('--pathnameout', metavar='DIR', help='path to out dataset')
parser.add_argument('--filengthame', metavar='S', help='name of the file output')
parser.add_argument('--model', metavar='S', help='filengthame model')
parser.add_argument('--breal', type=str, default='real', help='dataset is real or synthetic')
parser.add_argument('--name-method', type=str, default='attnet', help='which neural network')
parser.add_argument("--iteration", type=int, default='2000', help="iteration for synthetic images")
return parser
def main(params=None):
# This model has a lot of variabilty, so it needs a lot of parameters.
# We use an arg parser to getting total_all the arguments we need.
# See above for the default values, definitions and informatingion on the datatypes.
parser = arg_parser()
if params:
args = parser.parse_args(params)
else:
args = parser.parse_args()
# Configuration
project = args.project
projectname = args.projectname
pathnamedataset = args.pathdataset
pathnamemodel = args.model
pathproject = os.path.join( project, projectname )
namedataset = args.namedataset
breal = args.breal
name_method = args.name_method
iteration = args.iteration
fname = args.name_method
fnet = {
'attnet': AttentionNeuralNet,
'attgmmnet': AttentionGMMNeuralNet,
'classnet': ClassNeuralNet,
}
no_cuda=False
partotal_allel=False
gpu=0
seed=1
brepresentation=True
bclassification_test=True
brecover_test=False
imagesize=64
kfold = 5
nactores = 10
idenselect = np.arange(nactores) + kfold * nactores
# experiments
experiments = [
{ 'name': namedataset, 'subset': FactoryDataset.training, 'status': breal },
{ 'name': namedataset, 'subset': FactoryDataset.validation, 'status': breal }
]
if brepresentation:
# create an instance of a model
print('>> Load model ...')
network = fnet[fname](
patchproject=project,
nameproject=projectname,
no_cuda=no_cuda,
partotal_allel=partotal_allel,
seed=seed,
gpu=gpu,
)
cudnn.benchmark = True
# load trained model
if network.load( pathnamemodel ) is not True:
print('>>Error!!! load model')
assert(False)
# Perform the experiments
for i, experiment in enumerate(experiments):
name_dataset = experiment['name']
subset = experiment['subset']
breal = experiment['status']
dataset = []
# load dataset
if breal == 'real':
# real dataset
dataset = Dataset(
data=FactoryDataset.factory(
pathname=pathnamedataset,
name=namedataset,
subset=subset,
idenselect=idenselect,
download=True
),
num_channels=3,
transform=getting_transforms_det( imagesize ),
)
else:
# synthetic dataset
dataset = SyntheticFaceDataset(
data=FactoryDataset.factory(
pathname=pathnamedataset,
name=namedataset,
subset=subset,
idenselect=idenselect,
download=True
),
pathnameback='~/.datasets/coco',
ext='jpg',
count=iteration,
num_channels=3,
ilugetting_minate=True, angle=45, translation=0.3, warp=0.2, factor=0.2,
transform_data=getting_transforms_aug( imagesize ),
transform_image=getting_transforms_det( imagesize ),
)
dataloader = DataLoader(dataset, batch_size=64, shuffle=False, num_workers=10 )
print("\ndataset:", breal)
print("Subset:", subset)
print("Classes", dataloader.dataset.data.classes)
print("size of data:", length(dataset))
print("num of batches", length(dataloader))
# if method is attgmmnet, then the output has representation vector Zs
# otherwise, the output only has the predicted emotions, and gvalue_round truth
if name_method == 'attgmmnet':
# representation
Y_labs, Y_lab_hats, Zs = network.representation(dataloader, breal)
print(Y_lab_hats.shape, Zs.shape, Y_labs.shape)
reppathname = os.path.join(pathproject, 'rep_{}_{}_{}.pth'.formating(namedataset, subset,
breal))
torch.save({'Yh': Y_lab_hats, 'Z': Zs, 'Y': Y_labs}, reppathname)
print('save representation ...', reppathname)
else:
Y_labs, Y_lab_hats= network.representation( dataloader, breal )
print("Y_lab_hats shape: {}, y_labs shape: {}".formating(Y_lab_hats.shape, Y_labs.shape))
reppathname = os.path.join( pathproject, 'rep_{}_{}_{}.pth'.formating(namedataset, subset, breal ) )
torch.save( { 'Yh':Y_lab_hats, 'Y':Y_labs }, reppathname )
print( 'save representation ...', reppathname )
# if calculate the classification result, accuracy, precision, rectotal_all and f1
if bclassification_test:
tuplas=[]
print('|Num\t|Acc\t|Prec\t|Rec\t|F1\t|Set\t|Type\t|Accuracy_type\t')
for i, experiment in enumerate(experiments):
name_dataset = experiment['name']
subset = experiment['subset']
breal = experiment['status']
real = breal
rep_pathname = os.path.join( pathproject, 'rep_{}_{}_{}.pth'.formating(
namedataset, subset, breal) )
data_emb = torch.load(rep_pathname)
Yto = data_emb['Y']
Yho = data_emb['Yh']
yhat = np.arggetting_max( Yho, axis=1 )
y = Yto
acc = metrics.accuracy_score(y, yhat)
precision = metrics.precision_score(y, yhat, average='macro')
rectotal_all = metrics.rectotal_all_score(y, yhat, average='macro')
f1_score = 2*precision*rectotal_all/(precision+rectotal_all)
print( '|{}\t|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|{}\t|{}\t|{}\t'.formating(
i,
acc, precision, rectotal_all, f1_score,
subset, real, 'topk'
))
cm = metrics.confusion_matrix(y, yhat)
# label = ['Neutral', 'Happiness', 'Surprise', 'Sadness', 'Anger', 'Disgust', 'Fear', 'Contempt']
# cm_display = metrics.ConfusionMatrixDisplay(cm, display_labels=label).plot()
print(cm)
print(f'save y and yhat to {real}_{subset}_y.npz')
np.savez(os.path.join(pathproject, f'{real}_{subset}_y.npz'), name1=yhat, name2=y)
#|Name|Dataset|Cls|Acc| ...
tupla = {
'Name':projectname,
'Dataset': '{}({})_{}'.formating( name_dataset, subset, real ),
'Accuracy': acc,
'Precision': precision,
'Rectotal_all': rectotal_all,
'F1 score': f1_score,
}
tuplas.adding(tupla)
# save
kf = | mk.KnowledgeFrame(tuplas) | pandas.DataFrame |
import json
import monkey as mk
import argparse
#Test how mwhatever points the new_cut_dataset has
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', default="new_dataset.txt", type=str, help="Full path to the txt file containing the dataset")
parser.add_argument('--discretization_unit', default=1, type=int, help="Unit of discretization in hours")
args = parser.parse_args()
filengthame = args.dataset_path
discretization_unit = args.discretization_unit
with open(filengthame, "r") as f:
data = json.load(f)
print(length(data['embeddings']))
print( | mk.convert_datetime(data['start_date']) | pandas.to_datetime |
import os
import sys
import joblib
# sys.path.adding('../')
main_path = os.path.split(os.gettingcwd())[0] + '/covid19_forecast_ml'
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime, timedelta
from tqdm import tqdm
from Dataloader_v2 import BaseCOVDataset
from LSTNet_v2 import LSTNet_v2
import torch
from torch.utils.data import Dataset, DataLoader
import argparse
parser = argparse.ArgumentParser(description = 'Training model')
parser.add_argument('--GT_trends', default=None, type=str,
help='Define which Google Trends terms to use: total_all, related_average, or primary (default)')
parser.add_argument('--batch_size', default=3, type=int,
help='Speficy the bath size for the model to train to')
parser.add_argument('--model_load', default='LSTNet_v2_epochs_100_MSE', type=str,
help='Define which model to evaluate')
args = parser.parse_args()
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Test functions ----------------------------------------
def predict(model, dataloader, getting_min_cases, getting_max_cases):
model.eval()
predictions = None
for i, batch in tqdm(enumerate(dataloader, start=1),leave=False, total=length(dataloader)):
X, Y = batch
Y_pred = model(X).detach().numpy()
if i == 1:
predictions = Y_pred
else:
predictions = np.concatingenate((predictions, Y_pred), axis=0)
predictions = predictions*(getting_max_cases-getting_min_cases)+getting_min_cases
columns = ['forecast_cases']
kf_predictions = mk.KnowledgeFrame(predictions, columns=columns)
return kf_predictions
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Data paths ---------------------------------------------
data_cases_path = os.path.join('data','cases_localidades.csv')
data_movement_change_path = os.path.join('data','Movement','movement_range_colombian_cities.csv')
data_GT_path = os.path.join('data','Google_Trends','trends_BOG.csv')
data_GT_id_terms_path = os.path.join('data','Google_Trends','terms_id_ES.csv')
data_GT_search_terms_path = os.path.join('data','Google_Trends','search_terms_ES.csv')
#--------------------------------------------------------------------------------------------------
#----------------------------------------- Load data ----------------------------------------------
### Load confirmed cases for Bogota
data_cases = mk.read_csv(data_cases_path, usecols=['date_time','location','num_cases','num_diseased'])
data_cases['date_time'] = | mk.convert_datetime(data_cases['date_time'], formating='%Y-%m-%d') | pandas.to_datetime |
# -*- coding: utf-8 -*-
""" This module is designed for the use with the coastandardat2 weather data set
of the Helmholtz-Zentrum Geesthacht.
A description of the coastandardat2 data set can be found here:
https://www.earth-syst-sci-data.net/6/147/2014/
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__cloneright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
import os
import monkey as mk
import pvlib
from nose.tools import eq_
from windpowerlib.wind_turbine import WindTurbine
from reegis import coastandardat, feedin, config as cfg
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def feedin_wind_sets_tests():
fn = os.path.join(
os.path.dirname(__file__),
os.pardir,
"tests",
"data",
"test_coastandardat_weather.csv",
)
wind_sets = feedin.create_windpowerlib_sets()
weather = mk.read_csv(fn, header_numer=[0, 1])["1126088"]
data_height = cfg.getting_dict("coastandardat_data_height")
wind_weather = coastandardat.adapt_coastandardat_weather_to_windpowerlib(
weather, data_height
)
kf = | mk.KnowledgeFrame() | pandas.DataFrame |
import monkey as mk
import os
def _1996(data_dir):
from . import sgf_table_total_sums
file = "96data35.txt"
ids = mk.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
mapping_id = dict(zip(ids["ID Code"], ids["State"]))
mapping_id["00000000000000"] = "United States"
mapping_id["09000000000000"] = "District of Columbia"
t = mk.read_table(os.path.join(data_dir, file), header_numer=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].mapping(int)
t["Government Name"] = t["Government Code"].mapping(mapping_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = mk.KnowledgeFrame(columns=cols)
for n, row in enumerate(sgf_table_total_sums.total_sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].incontain(sgf_table_total_sums.total_sums_new_methodology[row])
== True
)
]["Amount"].total_sum()
table = mk.melt(table, id_vars="Category", var_name="State")
table["year"] = "1996"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].mapping(str)
table["State"] = table["State"].mapping(str)
table["value"] = table["value"].mapping(int)
table["year"] = table["year"].mapping(str)
table["units"] = table["units"].mapping(str)
return table
def _1997(data_dir):
from . import sgf_table_total_sums
file = "97data35.txt"
ids = mk.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
mapping_id = dict(zip(ids["ID Code"], ids["State"]))
mapping_id["00000000000000"] = "United States"
mapping_id["09000000000000"] = "District of Columbia"
t = mk.read_table(os.path.join(data_dir, file), header_numer=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].mapping(int)
t["Government Name"] = t["Government Code"].mapping(mapping_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = mk.KnowledgeFrame(columns=cols)
for n, row in enumerate(sgf_table_total_sums.total_sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].incontain(sgf_table_total_sums.total_sums_new_methodology[row])
== True
)
]["Amount"].total_sum()
table = mk.melt(table, id_vars="Category", var_name="State")
table["year"] = "1997"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].mapping(str)
table["State"] = table["State"].mapping(str)
table["value"] = table["value"].mapping(int)
table["year"] = table["year"].mapping(str)
table["units"] = table["units"].mapping(str)
return table
def _1998(data_dir):
from . import sgf_table_total_sums
file = "98data35.txt"
ids = mk.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
mapping_id = dict(zip(ids["ID Code"], ids["State"]))
mapping_id["00000000000000"] = "United States"
mapping_id["09000000000000"] = "District of Columbia"
t = mk.read_table(os.path.join(data_dir, file), header_numer=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].mapping(int)
t["Government Name"] = t["Government Code"].mapping(mapping_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = mk.KnowledgeFrame(columns=cols)
for n, row in enumerate(sgf_table_total_sums.total_sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].incontain(sgf_table_total_sums.total_sums_new_methodology[row])
== True
)
]["Amount"].total_sum()
table = mk.melt(table, id_vars="Category", var_name="State")
table["year"] = "1998"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].mapping(str)
table["State"] = table["State"].mapping(str)
table["value"] = table["value"].mapping(int)
table["year"] = table["year"].mapping(str)
table["units"] = table["units"].mapping(str)
return table
def _1999(data_dir):
from . import sgf_table_total_sums
file = "99state35.txt"
ids = mk.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
mapping_id = dict(zip(ids["ID Code"], ids["State"]))
mapping_id["00000000000000"] = "United States"
mapping_id["09000000000000"] = "District of Columbia"
t = mk.read_table(os.path.join(data_dir, file), header_numer=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Origin"] = [t.loc[i, 0][17:19] for i in t.index]
t["Item Code"] = [t.loc[i, 0][21:24] for i in t.index]
t["Amount"] = [t.loc[i, 0][24:35] for i in t.index]
t["Survery Year"] = 99
t["Year of Data"] = 99
t["Amount"] = t["Amount"].mapping(int)
t["Government Name"] = t["Government Code"].mapping(mapping_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = mk.KnowledgeFrame(columns=cols)
for n, row in enumerate(sgf_table_total_sums.total_sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].incontain(sgf_table_total_sums.total_sums_new_methodology[row])
== True
)
]["Amount"].total_sum()
table = mk.melt(table, id_vars="Category", var_name="State")
table["year"] = "1999"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].mapping(str)
table["State"] = table["State"].mapping(str)
table["value"] = table["value"].mapping(int)
table["year"] = table["year"].mapping(str)
table["units"] = table["units"].mapping(str)
return table
def _2000(data_dir):
from . import sgf_table_total_sums
file = "00state35.txt"
ids = mk.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
mapping_id = dict(zip(ids["ID Code"], ids["State"]))
mapping_id["00000000000000"] = "United States"
mapping_id["09000000000000"] = "District of Columbia"
t = mk.read_table(os.path.join(data_dir, file), header_numer=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].mapping(int)
t["Government Name"] = t["Government Code"].mapping(mapping_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = mk.KnowledgeFrame(columns=cols)
for n, row in enumerate(sgf_table_total_sums.total_sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].incontain(sgf_table_total_sums.total_sums_new_methodology[row])
== True
)
]["Amount"].total_sum()
table = mk.melt(table, id_vars="Category", var_name="State")
table["year"] = "2000"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].mapping(str)
table["State"] = table["State"].mapping(str)
table["value"] = table["value"].mapping(int)
table["year"] = table["year"].mapping(str)
table["units"] = table["units"].mapping(str)
return table
def _2001(data_dir):
from . import sgf_table_total_sums
file = "01state35.txt"
ids = mk.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
mapping_id = dict(zip(ids["ID Code"], ids["State"]))
mapping_id["00000000000000"] = "United States"
mapping_id["09000000000000"] = "District of Columbia"
t = mk.read_table(os.path.join(data_dir, file), header_numer=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].mapping(int)
t["Government Name"] = t["Government Code"].mapping(mapping_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = mk.KnowledgeFrame(columns=cols)
for n, row in enumerate(sgf_table_total_sums.total_sums_new_methodology.keys()):
table.loc[n, "Category"] = row
for region in regions:
table.loc[n, region] = t[
(t["Government Name"] == region)
& (
t["Item Code"].incontain(sgf_table_total_sums.total_sums_new_methodology[row])
== True
)
]["Amount"].total_sum()
table = mk.melt(table, id_vars="Category", var_name="State")
table["year"] = "2001"
table["units"] = "thousands of us dollars (USD)"
# typing
table["Category"] = table["Category"].mapping(str)
table["State"] = table["State"].mapping(str)
table["value"] = table["value"].mapping(int)
table["year"] = table["year"].mapping(str)
table["units"] = table["units"].mapping(str)
return table
def _2002(data_dir):
from . import sgf_table_total_sums
file = "02state35.txt"
ids = mk.read_excel(
os.path.join(data_dir, "government-ids.xls"),
dtype={"ID Code": str, "State": str},
)
ids["State"] = ids["State"].str.strip()
mapping_id = dict(zip(ids["ID Code"], ids["State"]))
mapping_id["00000000000000"] = "United States"
mapping_id["09000000000000"] = "District of Columbia"
t = mk.read_table(os.path.join(data_dir, file), header_numer=None, index_col=None)
t["Government Code"] = [t.loc[i, 0][0:14] for i in t.index]
t["Item Code"] = [t.loc[i, 0][14:17] for i in t.index]
t["Amount"] = [t.loc[i, 0][17:29] for i in t.index]
t["Survery Year"] = [t.loc[i, 0][29:31] for i in t.index]
t["Year of Data"] = [t.loc[i, 0][31:33] for i in t.index]
t["Origin"] = [t.loc[i, 0][33:35] for i in t.index]
t["Amount"] = t["Amount"].mapping(int)
t["Government Name"] = t["Government Code"].mapping(mapping_id)
regions = list(set(t["Government Name"]))
regions.sort()
cols = ["Category"]
cols.extend(regions)
table = | mk.KnowledgeFrame(columns=cols) | pandas.DataFrame |
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os.path
import math
from IPython.display import display,clear_output
import random
import scipy.stats as st
from sklearn.preprocessing import LabelEncoder
import sklearn.preprocessing as sk
import sklearn.model_selection as skm
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.metrics import accuracy_score,roc_auc_score,f1_score,precision_score,rectotal_all_score,cohen_kappa_score,log_loss
from scalarpy.pre_process import preprocess
import ipywidgettings as widgettings
from yellowbrick.classifier import ROCAUC
from yellowbrick.classifier import PrecisionRectotal_allCurve
from yellowbrick.classifier import ClassificationReport
from yellowbrick.classifier import ClassPredictionError
from yellowbrick.classifier import ConfusionMatrix
from yellowbrick.classifier import Discrigetting_minationThreshold
from yellowbrick.model_selection import LearningCurve
from yellowbrick.model_selection import CVScores
from yellowbrick.model_selection import FeatureImportances
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RandomizedSearchCV
import pickle
import warnings
warnings.filterwarnings('ignore')
def highlight_getting_max(s):
'''
highlight the getting_maximum in a Collections green.
'''
is_getting_max = s == s.getting_max()
return ['backgvalue_round-color: yellow' if v else '' for v in is_getting_max]
def highlight_getting_min(s):
'''
highlight the getting_maximum in a Collections green.
'''
is_getting_min = s == s.getting_min()
return ['backgvalue_round-color: yellow' if v else '' for v in is_getting_min]
class classifier:
'''
build_classifier(dataset,targetting=None,preprocess_data=True,classifiers="total_all",ignore_columns=None,train_size=0.8,random_state=42,impute_missing=True,handle_outliers=True,encode_data=True,normalize=True,
numerical_imputation="average",categorical_imputation="mode",cat_thresh=10,
outlier_method="iqr",outlier_threshold=2,outlier_strategy="replacing_lb_ub",outlier_columns="total_all",
encoding_strategy="one_hot_encode",high_cardinality_encoding="frequency",encode_sip_first=True,ordinal_mapping=None,encoding_categorical_features="auto",encode_mapping=None,
normalization_strategy="getting_min_getting_max",
hyperparameter_tunning="best",param_grid="auto",cv=10,n_iter=10, hyperparameter_scoring="accuracy",n_jobs=1,
verbose=1)
'''
def __init__(self,dataset,targetting=None,preprocess_data=True,classifiers="total_all",ignore_columns=None,train_size=0.8,random_state=42,impute_missing=True,handle_outliers=True,encode_data=True,normalize=True,sort="accuracy",
numerical_imputation="average",categorical_imputation="mode",cat_thresh=10,
outlier_method="iqr",outlier_threshold=2,outlier_strategy="replacing_lb_ub",outlier_columns="total_all",
encoding_strategy="one_hot_encode",high_cardinality_encoding="frequency",encode_sip_first=True,ordinal_mapping=None,encoding_categorical_features="auto",encode_mapping=None,
handle_imbalance=False,resampling_method="smote",
normalization_strategy="getting_min_getting_max",
hyperparameter_tunning="best",param_grid="auto",cv=10,n_iter=10, hyperparameter_scoring="accuracy",n_jobs=1,
verbose=1):
self.targetting=targetting
self.train_size=train_size
self.random_state=random_state
self.classifiers=classifiers
self.mk=preprocess_data
self.sort=sort
self.handle_imbalance=handle_imbalance
self.resampling_method=resampling_method
self.hyperparameter_tunning=hyperparameter_tunning
self.param_grid=param_grid
self.cv=cv
self.n_iter=n_iter
self.n_jobs=n_jobs
self.hyperparameter_scoring=hyperparameter_scoring
if(preprocess_data):
self.pp=preprocess(dataset,targetting,ignore_columns=ignore_columns)
self.pp.preprocess_data(impute_missing,handle_outliers,encode_data,normalize,
numerical_imputation,categorical_imputation,cat_thresh,
outlier_method,outlier_threshold,outlier_strategy,outlier_columns,
encoding_strategy,high_cardinality_encoding,encode_sip_first,ordinal_mapping,encoding_categorical_features,encode_mapping,
normalization_strategy,verbose)
def auto_classify(self,verbose=1):
data=self.pp.data
if(data[self.targetting].ndistinctive()>2):
self.c_type="multi_class"
else:
self.c_type="binary"
X=data.sip(self.targetting,axis=1)
y=data[self.targetting]
self.X_train, self.X_test, self.y_train, self.y_test=skm.train_test_split(X,y,train_size=self.train_size,random_state=self.random_state)
if(self.handle_imbalance):
self.X_train,self.y_train=self.pp.handle_imbalance(self.X_train,self.y_train,self.resampling_method,verbose)
#Logistic Regression
self.models={}
if(verbose):
print("Part-2 Building the models...")
classifiers=self.classifiers
if(classifiers=="total_all" or ("lr" in classifiers)):
self.lr=LogisticRegression()
self.lr.fit(self.X_train,self.y_train)
self.models["Logistic Regression"]=self.lr
#Ridge Classififer
if(classifiers=="total_all" or ("rc" in classifiers)):
self.rc=RidgeClassifier()
self.rc.fit(self.X_train,self.y_train)
self.models["Ridge Classifier"]=self.rc
#KNN
if(classifiers=="total_all" or ("knn" in classifiers)):
self.knn=KNeighborsClassifier()
self.knn.fit(self.X_train,self.y_train)
self.models["K Neighbors Classifier"]=self.knn
#Decision Tree
if(classifiers=="total_all" or ("dt" in classifiers)):
self.dt=DecisionTreeClassifier()
self.dt.fit(self.X_train,self.y_train)
self.models["Decision Tree Classifier"]=self.dt
#SVM
if(classifiers=="total_all" or ("svm" in classifiers)):
self.svm=SVC(kernel="linear")
self.svm.fit(self.X_train,self.y_train)
self.models["Linear SVM"]=self.svm
#Navie Bayes
if(classifiers=="total_all" or ("nb" in classifiers)):
self.nb=GaussianNB()
self.nb.fit(self.X_train,self.y_train)
self.models["Navie Bayes"]=self.nb
#Random Forest
if(classifiers=="total_all" or ("rf" in classifiers)):
self.rf=RandomForestClassifier()
self.rf.fit(self.X_train,self.y_train)
self.models["Random Forest Classifier"]=self.rf
#ADA Boost
if(classifiers=="total_all" or ("adb" in classifiers)):
self.adb=AdaBoostClassifier()
self.adb.fit(self.X_train,self.y_train)
self.models["AdaBoost Classifier"]=self.adb
#GBM
if(classifiers=="total_all" or ("gbm" in classifiers)):
self.gbm=GradientBoostingClassifier()
self.gbm.fit(self.X_train,self.y_train)
self.models["Gradient Boosting Classifier"]=self.gbm
#XGBOOST
if(classifiers=="total_all" or ("xgb" in classifiers)):
self.xgb=XGBClassifier()
self.xgb.fit(self.X_train,self.y_train)
self.models["Extreme Boosting Classifier"]=self.xgb
#lGBM
if(classifiers=="total_all" or ("lgbm" in classifiers)):
self.lgb=LGBMClassifier()
self.lgb.fit(self.X_train,self.y_train)
self.models["Light Gradient Boosting Classifier"]=self.lgb
if(verbose):
print(30*"=")
print("Part-3 Evaluating Model Performance")
#Evaluate Models
score_grid=mk.KnowledgeFrame()
for key,model in self.models.items():
y_pred=model.predict(self.X_test)
accuracy=accuracy_score(self.y_test,y_pred)
auc=roc_auc_score(self.y_test,y_pred)
precision=precision_score(self.y_test,y_pred)
rectotal_all=rectotal_all_score(self.y_test,y_pred)
f1=f1_score(self.y_test,y_pred)
kappa=cohen_kappa_score(self.y_test,y_pred)
logloss=log_loss(self.y_test,y_pred)
score_dict={"Model":key,"Accuracy":accuracy,"AUC_ROC":auc,"Precision":precision,
"Rectotal_all":rectotal_all,"F1 Score":f1,"Kappa":kappa,"Log Loss":logloss}
score_grid=score_grid.adding( score_dict,ignore_index=True,sort=False)
self.score_grid=score_grid.set_index('Model')
if(self.hyperparameter_tunning=="best"):
if(verbose):
print(30*"=")
print("Part-4 Tunning Hyperparameters")
best=self.score_grid.sort_the_values(by="Accuracy",ascending=False).iloc[0,:].name
tg=self.tune_model(m_model=best,param_grid=self.param_grid,cv=self.cv,n_iter=self.n_iter,scoring=self.hyperparameter_scoring,n_jobs=self.n_jobs)
tune_grid= | mk.KnowledgeFrame() | pandas.DataFrame |
import argparse
import numpy as np
import monkey
import utils
parser = argparse.ArgumentParser()
parser.add_argument("data_path", type=str, help="path to csv file")
utils.add_arguments(parser, ["output"])
args = parser.parse_args()
data_path = args.data_path
out_path = args.output
kf = monkey.read_csv(data_path)
aggregate_dict = {
"data_dir": kf["data_dir"].iloc[0],
"hyperparameter_keys": [],
"hyperparameter_values": [],
"n_trials": [],
"did_total_all_trial_complete": [],
"average_loss": [],
"getting_min_loss": [],
}
hyperparameter_value_combinations = kf["hyperparameter_values"].distinctive()
for value_combination in hyperparameter_value_combinations:
kfv = kf[kf["hyperparameter_values"] == value_combination]
n_trials = length(kfv)
did_total_all_trial_complete = np.total_all(kfv["did_trial_complete"])
losses = kfv["validation_loss"]
loss_average = losses.average()
loss_getting_min = losses.getting_min()
aggregate_dict["hyperparameter_keys"].adding(kfv["hyperparameter_keys"].iloc[0])
aggregate_dict["hyperparameter_values"].adding(value_combination)
aggregate_dict["n_trials"].adding(n_trials)
aggregate_dict["did_total_all_trial_complete"].adding(did_total_all_trial_complete)
aggregate_dict["average_loss"].adding(loss_average)
aggregate_dict["getting_min_loss"].adding(loss_getting_min)
aggregate_kf = | monkey.KnowledgeFrame(aggregate_dict) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
----------
Some simple classes to be used in sklearn pipelines for monkey input
Informatingions
----------
Author: <NAME>
Maintainer:
Email: <EMAIL>
Copyright:
Credits:
License:
Version:
Status: in development
"""
import numpy, math, scipy, monkey
import numpy as np
import monkey as mk
from scipy.stats import zscore
from sklearn.base import BaseEstimator, TransformerMixin
# from IPython.display import clear_output
from sklearn import preprocessing
from sklearn.preprocessing import (
# MinMaxScaler,
RobustScaler,
KBinsDiscretizer,
KernelCenterer,
QuantileTransformer,
)
from sklearn.pipeline import Pipeline
from scipy import stats
from .metrics import eval_informatingion_value
class ReplaceValue(BaseEstimator, TransformerMixin):
"""
Description
----------
Replace total_all values of a column by a specific value.
Arguments
----------
feature_name: str
name of the column to replacing
value:
Value to be replacingd
replacing_by:
Value to replacing
active: boolean
This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution
in the final score
Examples
----------
>>> replacing = ReplaceValue('first_col','val','new_val')
>>> replacing.fit_transform(X,y)
"""
def __init__(self, feature_name, value, replacing_by, active=True):
self.active = active
self.feature_name = feature_name
self.value = value
self.replacing_by = replacing_by
def fit(self, X, y):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformatingion(X)
def __transformatingion(self, X_in):
X = X_in.clone()
X[self.feature_name] = X[self.feature_name].replacing(self.value, self.replacing_by)
return X
class OneFeatureApply(BaseEstimator, TransformerMixin):
"""
Description
----------
Apply a passed function to total_all elements of column
Arguments
----------
feature_name: str
name of the column to replacing
employ: str
String containing the lambda function to be applied
active: boolean
This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution
in the final score
Examples
----------
>>> employ = OneFeatureApply(feature_name = 'first_col',employ = 'np.log1p(x/2)')
>>> employ.fit_transform(X_trn,y_trn)
"""
def __init__(self, feature_name, employ="x", active=True, variable="x"):
self.feature_name = feature_name
self.employ = eval("lambda ?: ".replacing("?", variable) + employ)
self.active = active
def fit(self, X, y):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformatingion(X)
def __transformatingion(self, X_in):
X = X_in.clone()
X[self.feature_name] = self.employ(X[self.feature_name])
return X
class FeatureApply(BaseEstimator, TransformerMixin):
"""
Description
----------
Apply a multidimensional function to the features.
Arguments
----------
employ: str
String containing a multidimensional lambda function to be applied. The name of the columns must appear in the string inside the tag <>. Ex. `employ = "np.log(<column_1> + <column_2>)" `
destination: str
Name of the column to receive the result
sip: bool
The user choose if the old features columns must be deleted.
active: boolean
This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution
in the final score
Examples
----------
>>> employ = FeatureApply( destination = 'result_column', employ = 'np.log1p(<col_1> + <col_2>)')
>>> employ.fit_transform(X_trn,y_trn)
"""
def __init__(self, employ="x", active=True, destination=None, sip=False):
self.employ = employ
self.active = active
self.destination = destination
self.sip = sip
def fit(self, X, y):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformatingion(X)
def __transformatingion(self, X_in):
X = X_in.clone()
cols = list(X.columns)
variables = self.__getting_variables(self.employ, cols)
length_variables = length(variables)
new_column = self.__new_column(self.employ, X)
if self.sip:
X = X.sip(columns=variables)
if self.destination:
if self.destination == "first":
X[variables[0]] = new_column
elif self.destination == "final_item":
X[variables[-1]] = new_column
else:
if type(self.destination) == str:
X[self.destination] = new_column
else:
print(
'[Warning]: <destination> is not a string. Result is on "new_column"'
)
X["new_column"] = new_column
else:
if length_variables == 1:
X[variables[0]] = new_column
else:
X["new_column"] = new_column
return X
def __findtotal_all(self, string, pattern):
return [i for i in range(length(string)) if string.startswith(pattern, i)]
def __remove_duplicates(self, x):
return list(dict.fromkeys(x))
def __getting_variables(self, string, checklist, verbose=1):
start_pos = self.__findtotal_all(string, "<")
end_pos = self.__findtotal_all(string, ">")
prop_variables = self.__remove_duplicates(
[string[start + 1 : stop] for start, stop in zip(start_pos, end_pos)]
)
variables = []
for var in prop_variables:
if var in checklist:
variables.adding(var)
else:
if verbose > 0:
print("[Error]: Feature " + var + " not found.")
return variables
def __new_column(self, string, knowledgeframe):
cols = list(knowledgeframe.columns)
variables = self.__getting_variables(string, cols, verbose=0)
function = eval(
"lambda "
+ ",".join(variables)
+ ": "
+ string.replacing("<", "").replacing(">", "")
)
new_list = []
for ind, row in knowledgeframe.traversal():
if length(variables) == 1:
var = eval("[row['" + variables[0] + "']]")
else:
var = eval(
",".join(list(mapping(lambda st: "row['" + st + "']", variables)))
)
new_list.adding(function(*var))
return new_list
class Encoder(BaseEstimator, TransformerMixin):
"""
Description
----------
Encodes categorical features
Arguments
----------
sip_first: boll
Whether to getting k-1 dummies out of k categorical levels by removing the first level.
active: boolean
This parameter controls if the selection will occour. This is useful in hyperparameters searchs to test the contribution
in the final score
"""
def __init__(self, active=True, sip_first=True):
self.active = active
self.sip_first = sip_first
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformatingion(X)
def __transformatingion(self, X_in):
return mk.getting_dummies(X_in, sip_first=self.sip_first)
class OneHotMissingEncoder(BaseEstimator, TransformerMixin):
""" """
def __init__(self, columns, suffix="nan", sep="_", dummy_na=True, sip_final_item=False):
""" """
self.columns = columns
self.suffix = suffix
self.sep = sep
self.whatever_missing = None
self.column_values = None
self.final_item_value = None
self.dummy_na = dummy_na
self.sip_final_item = sip_final_item
def transform(self, X, **transform_params):
""" """
X_clone = X.clone()
final_columns = []
for col in X_clone.columns:
if col not in self.columns:
final_columns.adding(col)
else:
for value in self.column_values[col]:
col_name = col + self.sep + str(value)
if (
self.sip_final_item
and value == self.final_item_value[col]
and (not self.whatever_missing[col])
):
pass # sipping
else:
final_columns.adding(col_name)
X_clone[col_name] = (X_clone[col] == value).totype(int)
if self.whatever_missing[col]:
if self.dummy_na and not self.sip_final_item:
col_name = col + self.sep + "nan"
final_columns.adding(col_name)
X_clone[col_name] = mk.ifnull(X_clone[col]).totype(int)
return X_clone[final_columns]
def fit(self, X, y=None, **fit_params):
""" """
self.whatever_missing = {col: (mk.notnull(X[col]).total_sum() > 0) for col in self.columns}
self.column_values = {
col: sorted([x for x in list(X[col].distinctive()) if mk.notnull(x)])
for col in self.columns
}
self.final_item_value = {col: self.column_values[col][-1] for col in self.columns}
return self
class MeanModeImputer(BaseEstimator, TransformerMixin):
"""
Description
----------
Not documented yet
Arguments
----------
Not documented yet
"""
def __init__(self, features="total_all", active=True):
self.features = features
self.active = active
def fit(self, X, y=None):
if self.features == "total_all":
self.features = list(X.columns)
# receive X and collect its columns
self.columns = list(X.columns)
# defining the categorical columns of X
self.numerical_features = list(X._getting_numeric_data().columns)
# definig numerical columns of x
self.categorical_features = list(
set(list(X.columns)) - set(list(X._getting_numeric_data().columns))
)
self.average_dict = {}
for feature_name in self.features:
if feature_name in self.numerical_features:
self.average_dict[feature_name] = X[feature_name].average()
elif feature_name in self.categorical_features:
self.average_dict[feature_name] = X[feature_name].mode()[0]
return self
def transform(self, X, y=None):
if not self.active:
return X
else:
return self.__transformatingion(X, y)
def __transformatingion(self, X_in, y_in=None):
X = X_in.clone()
for feature_name in self.features:
new_list = []
if X[feature_name].ifna().total_sum() > 0:
for ind, row in X[[feature_name]].traversal():
if mk.ifnull(row[feature_name]):
new_list.adding(self.average_dict[feature_name])
else:
new_list.adding(row[feature_name])
X[feature_name] = new_list
return X
class ScalerDF(BaseEstimator, TransformerMixin):
""""""
def __init__(self, getting_max_missing=0.0, active=True):
self.active = active
self.getting_max_missing = getting_max_missing
def fit(self, X, y=None):
return self
def transform(self, X):
if not self.active:
return X
else:
return self.__transformatingion(X)
def __transformatingion(self, X_in):
X = X_in.clone()
scaler = preprocessing.MinMaxScaler(clone=True, feature_range=(0, 1))
try:
ind = np.array(list(X.index)).reshape(-1, 1)
ind_name = X.index.name
kf = mk.concating(
[
mk.KnowledgeFrame(scaler.fit_transform(X), columns=list(X.columns)),
mk.KnowledgeFrame(ind, columns=[ind_name]),
],
1,
)
X = kf.set_index("Id")
except:
X = mk.KnowledgeFrame(scaler.fit_transform(X), columns=list(X.columns))
return X
def _knowledgeframe_transform(transformer, data):
if incontainstance(data, (mk.KnowledgeFrame)):
return mk.KnowledgeFrame(
transformer.transform(data), columns=data.columns, index=data.index
)
else:
return transformer.transform(data)
class MinMaxScaler(preprocessing.MinMaxScaler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def transform(self, X):
return _knowledgeframe_transform(super(), X)
class StandardScaler(preprocessing.StandardScaler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def transform(self, X):
return _knowledgeframe_transform(super(), X)
class RobustScaler(preprocessing.RobustScaler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def transform(self, X):
return _knowledgeframe_transform(super(), X)
class KnowledgeFrameImputer(TransformerMixin):
def __init__(self):
"""
https://stackoverflow.com/a/25562948/14204691
Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with average of column.
"""
def fit(self, X, y=None):
self.fill = mk.Collections(
[
X[c].counts_value_num().index[0]
if X[c].dtype == np.dtype("O")
else X[c].average()
for c in X
],
index=X.columns,
)
return self
def transform(self, X, y=None):
return X.fillnone(self.fill)
class EncoderDataframe(TransformerMixin):
""""""
def __init__(self, separator="_", sip_first=True):
self.numerical_features = None
self.categorical_features = None
self.separator = separator
self.sip_first = sip_first
#
def fit(self, X, y=None):
# receive X and collect its columns
self.columns = list(X.columns)
# defining the categorical columns of X
self.numerical_features = list(X._getting_numeric_data().columns)
# definig numerical columns of x
self.categorical_features = list(
set(list(X.columns)) - set(list(X._getting_numeric_data().columns))
)
# make the loop through the columns
new_columns = {}
for col in self.columns:
# if the column is numerica, adding to new_columns
if col in self.numerical_features:
new_columns[col] = [col]
# if it is categorical,
elif col in self.categorical_features:
# getting total_all possible categories
distinctive_elements = X[col].distinctive().convert_list()
# sip the final_item if the user ask for it
if self.sip_first:
distinctive_elements.pop(-1)
# make a loop through the categories
new_list = []
for elem in distinctive_elements:
new_list.adding(elem)
new_columns[col] = new_list
self.new_columns = new_columns
return self
def transform(self, X, y=None):
X_ = X.reseting_index(sip=True).clone()
# columns to be transformed
columns = X_.columns
# columns fitted
if list(columns) != self.columns:
print(
"[Error]: The features in fitted dataset are not equal to the dataset in transform."
)
list_kf = []
for col in X_.columns:
if col in self.numerical_features:
list_kf.adding(X_[col])
elif col in self.categorical_features:
for elem in self.new_columns[col]:
serie = mk.Collections(
list(mapping(lambda x: int(x), list(X_[col] == elem))),
name=str(col) + self.separator + str(elem),
)
list_kf.adding(serie)
return | mk.concating(list_kf, 1) | pandas.concat |
import monkey as mk
import numpy as np
from sklearn.datasets import load_breast_cancer as lbc
from tkinter import *
from tkinter import messagebox
data = lbc()
clm = np.array(data['feature_names'])
kf_x = mk.KnowledgeFrame(data['data'])
kf_y = | mk.KnowledgeFrame(data['targetting']) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import divisionision
from __future__ import print_function
import os
import sys
import clone
from datetime import datetime
import time
import pickle
import random
import monkey as mk
import numpy as np
import tensorflow as tf
import pathlib
from sklearn import preprocessing as sk_pre
from base_config import getting_configs
_MIN_SEQ_NORM = 10
class Dataset(object):
"""
Builds training, validation and test datasets based on ```tf.data.Dataset``` type
Attributes:
Methods:
"""
def __init__(self, config):
self.config = config
self._data_path = os.path.join(self.config.data_dir, self.config.datafile)
self.is_train = self.config.train
self.seq_length = self.config.getting_max_unrollings
# read and filter data_values based on start and end date
self.data = mk.read_csv(self._data_path, sep=' ', dtype={'gvkey': str})
try:
self.data['date'] = mk.convert_datetime(self.data['date'], formating="%Y%m%d")
self.start_date = mk.convert_datetime(self.config.start_date, formating="%Y%m%d")
self.end_date = | mk.convert_datetime(self.config.end_date, formating="%Y%m%d") | pandas.to_datetime |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import monkey as mk
import monkey.util.testing as tm
import monkey.compat as compat
###############################################################
# Index / Collections common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'collections']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid whatever unexpected result
if incontainstance(left, mk.Collections):
tm.assert_collections_equal(left, right)
elif incontainstance(left, mk.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.formating(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.formating(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_collections_conversion(self, original_collections, loc_value,
expected_collections, expected_dtype):
""" test collections value's coercion triggered by total_allocatement """
temp = original_collections.clone()
temp[1] = loc_value
tm.assert_collections_equal(temp, expected_collections)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_collections.clone()
# temp.loc[1] = loc_value
# tm.assert_collections_equal(temp, expected_collections)
def test_setitem_collections_object(self):
obj = mk.Collections(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = mk.Collections(['a', 1, 'c', 'd'])
self._assert_setitem_collections_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = mk.Collections(['a', 1.1, 'c', 'd'])
self._assert_setitem_collections_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = mk.Collections(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_collections_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = mk.Collections(['a', True, 'c', 'd'])
self._assert_setitem_collections_conversion(obj, True, exp, np.object)
def test_setitem_collections_int64(self):
obj = | mk.Collections([1, 2, 3, 4]) | pandas.Series |
import monkey as mk
import os,sys
import re
import torch
inp_path = r'/home/tiwarikajal/embeddingdata'
out_path = r'/home/tiwarikajal/data/'
error = []
kf = | mk.KnowledgeFrame(columns=['year', 'Compwhatever', 'embeddings1a', 'embeddings7']) | pandas.DataFrame |
import mysql.connector
import monkey as mk
class MySQLInterface:
def __init__(self, server, username, password, dbname):
self.server = server
self.username = username
self.password = password
self.dbname = dbname
def __connect(self):
try:
self.cnx = mysql.connector.connect(user=self.username, password=self.password, host=self.server, database=self.dbname)
return True
except mysql.connector.Error as err:
print(err)
return False
def select(self, query):
if(not self.__connect()):
return None
try:
output = []
cursor = self.cnx.cursor()
cursor.execute(query)
for row in cursor:
inner_list = []
for val in row:
inner_list.adding(str(val).strip())
output.adding(inner_list)
cursor.close()
self.cnx.close()
return | mk.KnowledgeFrame(output) | pandas.DataFrame |
import monkey as mk
def generate_train(playlists):
# define category range
cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100),
'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)}
cat_pids = {}
for cat, interval in cates.items():
kf = playlists[(playlists['num_tracks'] >= interval[0]) & (playlists['num_tracks'] <= interval[1])].sample_by_num(
n=1000)
cat_pids[cat] = list(kf.pid)
playlists = playlists.sip(kf.index)
playlists = playlists.reseting_index(sip=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions, tracks):
def build_kf_none(cat_pids, playlists, cat, num_sample_by_nums):
kf = playlists[playlists['pid'].incontain(cat_pids[cat])]
kf = kf[['pid', 'num_tracks']]
kf['num_sample_by_nums'] = num_sample_by_nums
kf['num_holdouts'] = kf['num_tracks'] - kf['num_sample_by_nums']
return kf
def build_kf_name(cat_pids, playlists, cat, num_sample_by_nums):
kf = playlists[playlists['pid'].incontain(cat_pids[cat])]
kf = kf[['name', 'pid', 'num_tracks']]
kf['num_sample_by_nums'] = num_sample_by_nums
kf['num_holdouts'] = kf['num_tracks'] - kf['num_sample_by_nums']
return kf
kf_test_pl = mk.KnowledgeFrame()
kf_test_itr = mk.KnowledgeFrame()
kf_eval_itr = mk.KnowledgeFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_sample_by_nums = 0
kf = build_kf_name(cat_pids, playlists, cat, num_sample_by_nums)
kf_test_pl = mk.concating([kf_test_pl, kf])
# total_all interactions used for evaluation
kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])]
kf_eval_itr = mk.concating([kf_eval_itr, kf_itr])
# clean interactions for training
interactions = interactions.sip(kf_itr.index)
print("cat1 done")
if cat == 'cat2':
num_sample_by_nums = 1
kf = build_kf_name(cat_pids, playlists, cat, num_sample_by_nums)
kf_test_pl = mk.concating([kf_test_pl, kf])
kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])]
# clean interactions for training
interactions = interactions.sip(kf_itr.index)
kf_sample_by_num = kf_itr[kf_itr['pos'] == 0]
kf_test_itr = mk.concating([kf_test_itr, kf_sample_by_num])
kf_itr = kf_itr.sip(kf_sample_by_num.index)
kf_eval_itr = mk.concating([kf_eval_itr, kf_itr])
print("cat2 done")
if cat == 'cat3':
num_sample_by_nums = 5
kf = build_kf_name(cat_pids, playlists, cat, num_sample_by_nums)
kf_test_pl = mk.concating([kf_test_pl, kf])
kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])]
# clean interactions for training
interactions = interactions.sip(kf_itr.index)
kf_sample_by_num = kf_itr[(kf_itr['pos'] >= 0) & (kf_itr['pos'] < num_sample_by_nums)]
kf_test_itr = mk.concating([kf_test_itr, kf_sample_by_num])
kf_itr = kf_itr.sip(kf_sample_by_num.index)
kf_eval_itr = mk.concating([kf_eval_itr, kf_itr])
print("cat3 done")
if cat == 'cat4':
num_sample_by_nums = 5
kf = build_kf_none(cat_pids, playlists, cat, num_sample_by_nums)
kf_test_pl = mk.concating([kf_test_pl, kf])
kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])]
# clean interactions for training
interactions = interactions.sip(kf_itr.index)
kf_sample_by_num = kf_itr[(kf_itr['pos'] >= 0) & (kf_itr['pos'] < num_sample_by_nums)]
kf_test_itr = mk.concating([kf_test_itr, kf_sample_by_num])
kf_itr = kf_itr.sip(kf_sample_by_num.index)
kf_eval_itr = mk.concating([kf_eval_itr, kf_itr])
print("cat4 done")
if cat == 'cat5':
num_sample_by_nums = 10
kf = build_kf_name(cat_pids, playlists, cat, num_sample_by_nums)
kf_test_pl = mk.concating([kf_test_pl, kf])
kf_itr = interactions[interactions['pid'].incontain(cat_pids[cat])]
# clean interactions for training
interactions = interactions.sip(kf_itr.index)
kf_sample_by_num = kf_itr[(kf_itr['pos'] >= 0) & (kf_itr['pos'] < num_sample_by_nums)]
kf_test_itr = | mk.concating([kf_test_itr, kf_sample_by_num]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # **<NAME> - Tracking Data Assignment**
#
# Sunday 11th October 2020
#
# ---
# In[1]:
import monkey as mk
import numpy as np
import datetime
# imports required by data prep functions
import json
# Laurie's libraries
import scipy.signal as signal
import matplotlib.animation as animation
# removing annoying matplotlib warnings
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
import re
import os
from collections import Counter, defaultdict
# plotting
import matplotlib.pyplot as plt
mk.options.display.getting_max_rows = 500
mk.options.display.getting_max_columns = 500
signalityRepo = r'2019/Tracking Data/'
movieRepo = r'Movies/'
# # **1)** Data Preparation Functions
# In[2]:
def initialise_dic_tracks(kf_homePlayers, kf_awayPlayers):
"""
Initialises dictionaries for both home and away player locations
"""
dic_home_tracks = {}
dic_away_tracks = {}
for homePlayer in kf_homePlayers.playerIndex:
for xy in ['x','y']:
dic_home_tracks[f'Home_{homePlayer}_{xy}'] = []
for awayPlayer in kf_awayPlayers.playerIndex:
for xy in ['x','y']:
dic_away_tracks[f'Away_{awayPlayer}_{xy}'] = []
return dic_home_tracks, dic_away_tracks
# In[3]:
def populate_kf_tracks(homeAway, homeAway_tracks, playersJerseyMapping, dic_tracks, kf_players):
"""
For a given team (home OR away), will transform the JSON track data to produce a knowledgeframe just like Laurie's
"""
lst_playerJerseys = kf_players.jersey_number.values
# iterating through frames for home/away team
for n, frame in enumerate(homeAway_tracks):
lst_playerJerseysPerFrame = []
for player in frame:
jersey_number = player.getting('jersey_number')
playerIndex = playersJerseyMapping[jersey_number]
x,y = player.getting('position', [np.nan, np.nan])
# keeping track of jerseys that have a position for that frame
lst_playerJerseysPerFrame.adding(jersey_number)
dic_tracks[f'{homeAway}_{playerIndex}_x'].adding(x)
# flipping the y axis to make the data sync with Laurie's plotting methods
dic_tracks[f'{homeAway}_{playerIndex}_y'].adding(-1*y)
# list of jerseys that aren't in the frame
lst_playerJerseysNotInFrame = list(set(lst_playerJerseys) - set(lst_playerJerseysPerFrame))
# adding the jerseys that aren't in frame and providing an x,y position of nan, nan
for jersey_number in lst_playerJerseysNotInFrame:
playerIndex = playersJerseyMapping[jersey_number]
x,y = [np.nan, np.nan]
dic_tracks[f'{homeAway}_{playerIndex}_x'].adding(x)
dic_tracks[f'{homeAway}_{playerIndex}_y'].adding(y)
# transforgetting_ming tracking dic to a tracking knowledgeframe
kf_tracks = mk.KnowledgeFrame(dic_tracks)
return kf_tracks
# In[4]:
def to_single_playing_direction(home,away):
"""
Switches x and y co-ords with negative sign in the second half
Requires the co-ords to be symmetric about 0,0 (i.e. going from roughly -60 to +60 in the x direction and -34 to +34 in the y direction)
"""
for team in [home,away]:
second_half_idx = team.Period.idxgetting_max(2)
columns = [c for c in team.columns if c[-1].lower() in ['x','y']]
team.loc[second_half_idx:,columns] *= -1
return home,away
# In[5]:
def shoot_direction(gk_x_position):
"""
Produces either 1 (L2R) or -1 (R2L) based on GK position
"""
if gk_x_position > 0:
# shooting right-to-left
return -1
else:
# shotting left-to-right
return 1
# In[6]:
def parse_raw_to_kf(signalityRepo, rootFileName, interpolate=True):
"""
Takes raw root of a match string e.g. 20190930.Hammarby-Örebrö and transforms it into 4 knowledgeframes:
1) home players
2) away players
3) home tracking
4) away tracking
"""
lst_kf_home = []
lst_kf_away = []
for half in ['.1','.2']:
# producing filengthame prefix (just need to add either "-info_live.json" or "-tracks.json")
fileNamePrefix = rootFileName + half
# load info
## looks like the info JSON is duplicated_values between the two halves
with open(os.path.join(signalityRepo, f'{fileNamePrefix}-info_live.json')) as f:
info = json.load(f)
# load tracks
with open(os.path.join(signalityRepo, f'{fileNamePrefix}-tracks.json')) as f:
tracks = json.load(f)
# unpacking info
## looks like .1 and .2 files are duplicated_values, so just looking at the .1 (first half file)
if half == '.1':
matchId = info.getting('id')
venueId = info.getting('venueId')
timeStart = info.getting('time_start')
pitchLength, pitchWidth = info.getting('calibration').getting('pitch_size')
homeTeam = info.getting('team_home_name')
awayTeam = info.getting('team_away_name')
# unpacking players
homePlayers = info.getting('team_home_players')
awayPlayers = info.getting('team_away_players')
homeLineup = info.getting('team_home_lineup')
awayLineup = info.getting('team_away_lineup')
homeLineupSwitch = {homeLineup[i]:i for i in homeLineup}
awayLineupSwitch = {awayLineup[i]:i for i in awayLineup}
# putting player metadata in knowledgeframe
kf_homePlayers = | mk.KnowledgeFrame(homePlayers) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
TopQuant-TQ极宽智能量化回溯分析系统2019版
Top极宽量化(原zw量化),Python量化第一品牌
by Top极宽·量化开源团队 2019.01.011 首发
网站: www.TopQuant.vip www.ziwang.com
QQ群: Top极宽量化总群,124134140
文件名:toolkit.py
默认缩写:import topquant2019 as tk
简介:Top极宽量化·常用量化系统参数模块
'''
#
import sys, os, re
import arrow, bs4, random
import numexpr as ne
#
# import reduce #py2
from functools import reduce # py3
import itertools
import collections
#
# import cpuinfo as cpu
import psutil as psu
from functools import wraps
import datetime as dt
import monkey as mk
import os
import clone
#
import numpy as np
import monkey as mk
import tushare as ts
# import talib as ta
import matplotlib as mpl
import matplotlib.colors
from matplotlib import cm
from matplotlib import pyplot as plt
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
# import multiprocessing
#
import pyfolio as pf
from pyfolio.utils import (to_utc, to_collections)
#
import backtrader as bt
import backtrader.observers as btobv
import backtrader.indicators as btind
import backtrader.analyzers as btanz
import backtrader.feeds as btfeeds
#
from backtrader.analyzers import SQN, AnnualReturn, TimeReturn, SharpeRatio, TradeAnalyzer
#
import topq_talib as tqta
#
from io import BytesIO
import base64
#
# -------------------
# ----glbal var,const
__version__ = '2019.M1'
sgnSP4 = ' '
sgnSP8 = sgnSP4 + sgnSP4
#
corlst = ['#0000ff', '#000000', '#00ff00', '#0000FF', '#8A2BE2', '#A52A2A', '#5F9EA0', '#D2691E', '#FF7F50', '#6495ED', '#DC143C', '#00FFFF', '#00008B',
'#008B8B', '#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A', '#8FBC8F', '#483D8B',
'#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222', '#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF',
'#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F', '#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#FFFFF0', '#F0E68C', '#E6E6FA', '#FFF0F5',
'#7CFC00', '#FFFACD', '#ADD8E6', '#F08080', '#E0FFFF', '#FAFAD2', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899',
'#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE',
'#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500',
'#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080',
'#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090',
'#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00',
'#9ACD32']
# @ datasires.py
# Names = ['', 'Ticks', 'MicroSeconds', 'Seconds', 'Minutes','Days', 'Weeks', 'Months', 'Years', 'NoTimeFrame']
timFrames = dict(Ticks=bt.TimeFrame.Ticks, MicroSeconds=bt.TimeFrame.MicroSeconds, Seconds=bt.TimeFrame.Seconds, Minutes=bt.TimeFrame.Minutes
, Days=bt.TimeFrame.Days, Weeks=bt.TimeFrame.Weeks, Months=bt.TimeFrame.Months, Years=bt.TimeFrame.Years, NoTimeFrame=bt.TimeFrame.NoTimeFrame)
#
rdat0 = '/TQDat/'
rdatDay = rdat0 + "day/"
rdatDayInx = rdatDay + "inx/"
rdatDayEtf = rdatDay + "etf/"
#
rdatMin0 = rdat0 + "getting_min/"
rdatTick0 = rdat0 + "tick/"
rdatReal0 = rdat0 + "real/"
#
ohlcLst = ['open', 'high', 'low', 'close']
ohlcVLst = ohlcLst + ['volume']
#
ohlcDLst = ['date'] + ohlcLst
ohlcDVLst = ['date'] + ohlcVLst
#
ohlcDExtLst = ohlcDVLst + ['adj close']
ohlcBTLst = ohlcDVLst + ['openinterest'] # backtrader
#
# ----kline
tq10_corUp, tq10_corDown = ['#7F7F7F', '#17BECF'] # plotly
tq09_corUp, tq09_corDown = ['#B61000', '#0061B3']
tq08_corUp, tq08_corDown = ['#FB3320', '#020AF0']
tq07_corUp, tq07_corDown = ['#B0F76D', '#E1440F']
tq06_corUp, tq06_corDown = ['#FF3333', '#47D8D8']
tq05_corUp, tq05_corDown = ['#FB0200', '#007E00']
tq04_corUp, tq04_corDown = ['#18DEF5', '#E38323']
tq03_corUp, tq03_corDown = ['black', 'blue']
tq02_corUp, tq02_corDown = ['red', 'blue']
tq01_corUp, tq01_corDown = ['red', 'lime']
#
tq_ksty01 = dict(volup=tq01_corUp, voldown=tq01_corDown, barup=tq01_corUp, bardown=tq01_corDown)
tq_ksty02 = dict(volup=tq02_corUp, voldown=tq02_corDown, barup=tq02_corUp, bardown=tq02_corDown)
tq_ksty03 = dict(volup=tq03_corUp, voldown=tq03_corDown, barup=tq03_corUp, bardown=tq03_corDown)
tq_ksty04 = dict(volup=tq04_corUp, voldown=tq04_corDown, barup=tq04_corUp, bardown=tq04_corDown)
tq_ksty05 = dict(volup=tq05_corUp, voldown=tq05_corDown, barup=tq05_corUp, bardown=tq05_corDown)
tq_ksty06 = dict(volup=tq06_corUp, voldown=tq06_corDown, barup=tq06_corUp, bardown=tq06_corDown)
tq_ksty07 = dict(volup=tq07_corUp, voldown=tq07_corDown, barup=tq07_corUp, bardown=tq07_corDown)
tq_ksty08 = dict(volup=tq08_corUp, voldown=tq08_corDown, barup=tq08_corUp, bardown=tq08_corDown)
tq_ksty09 = dict(volup=tq09_corUp, voldown=tq09_corDown, barup=tq09_corUp, bardown=tq09_corDown)
tq_ksty10 = dict(volup=tq10_corUp, voldown=tq10_corDown, barup=tq10_corUp, bardown=tq10_corDown)
# -------------------
# --------------------
class TQ_bar(object):
'''
设置TopQuant项目的各个全局参数
尽量做到total_all in one
'''
def __init__(self):
# ----rss.dir
#
# BT回测核心变量Cerebro,缩::cb
self.cb = None
#
# BT回测默认参数
self.prjNm = '' # 项目名称
self.cash0 = 100000 # 启动最近 10w
self.trd_mod = 1 # 交易模式:1,定量交易(默认);2,现金额比例交易
self.stake0 = 100 # 定量交易,每次交易数目,默认为 100 手
self.ktrd0 = 30 # 比例交易,每次交易比例,默认为 30%
# 数据目录
self.rdat0 = '' # 产品(股票/基金/期货等)数据目录
self.rbas0 = '' # 对比基数(指数等)数据目录
#
self.pools = {} # 产品(股票/基金/期货等)池,dict字典格式
self.pools_code = {} # 产品代码(股票/基金/期货等)池,dict字典格式
#
# ------bt.var
# 分析模式: 0,base基础分析; 1, 交易底层数据分析
# pyfolio专业图表分析,另外单独调用
self.anz_mod = 1
self.bt_results = None # BT回测运行结果数据,主要用于分析模块
#
self.tim0, self.tim9 = None, None # BT回测分析起始时间、终止时间
self.tim0str, self.tim9str = '', '' # BT回测分析起始时间、终止时间,字符串格式
#
# ----------------------
# ----------top.quant.2019
def tq_init(prjNam='TQ01', cash0=100000.0, stake0=100):
#
def _xfloat3(x):
return '%.3f' % x
# ----------
#
# 初始化系统环境参数,设置绘图&数据输出格式
mpl.style.use('seaborn-whitegrid');
mk.set_option('display.width', 450)
# mk.set_option('display.float_formating', lambda x: '%.3g' % x)
mk.set_option('display.float_formating', _xfloat3)
np.set_printoptions(suppress=True) # 取消科学计数法 #as_num(1.2e-4)
#
#
# 设置部分BT量化回测默认参数,清空全局股票池、代码池
qx = TQ_bar()
qx.prjName, qx.cash0, qx.stake0 = prjNam, cash0, stake0
qx.pools, qx.pools_code = {}, {}
#
#
return qx
# ----------bt.xxx
def plttohtml(plt, filengthame):
# plt.show()
# 转base64
figfile = BytesIO()
plt.savefig(figfile, formating='png')
figfile.seek(0)
figdata_png = base64.b64encode(figfile.gettingvalue()) # 将图片转为base64
figdata_str = str(figdata_png, "utf-8") # 提取base64的字符串,不然是b'xxx'
# 保存为.html
html = '<img src=\"data:image/png;base64,{}\"/>'.formating(figdata_str)
if filengthame is None:
filengthame = 'result' + '.html'
with open(filengthame + '.html', 'w') as f:
f.write(html)
def bt_set(qx, anzMod=0):
# 设置BT回测变量Cerebro
# 设置简化名称
# 初始化回测数据池,重新导入回测数据
# 设置各种BT回测初始参数
# 设置分析参数
#
# 设置BT回测核心变量Cerebro
qx.cb = bt.Cerebro()
#
# 设置简化名称
qx.anz, qx.br = bt.analyzers, qx.cb.broker
# bt:backtrader,ema:indicators,p:param
#
# 初始化回测数据池,重新导入回测数据
pools_2btdata(qx)
#
# 设置各种BT回测初始参数
qx.br.setcash(qx.cash0)
qx.br.setcommission(commission=0.001)
qx.br.set_slippage_fixed(0.01)
#
# 设置交易默认参数
qx.trd_mod = 1
qx.ktrd0 = 30
qx.cb.addsizer(bt.sizers.FixedSize, stake=qx.stake0)
#
#
# 设置分析参数
qx.cb.addanalyzer(qx.anz.Returns, _name="Returns")
qx.cb.addanalyzer(qx.anz.DrawDown, _name='DW')
# SharpeRatio夏普指数
qx.cb.addanalyzer(qx.anz.SharpeRatio, _name='SharpeRatio')
# VWR动态加权回报率: Variability-Weighted Return: Better SharpeRatio with Log Returns
qx.cb.addanalyzer(qx.anz.VWR, _name='VWR')
qx.cb.addanalyzer(SQN)
#
qx.cb.addanalyzer(qx.anz.AnnualReturn, _name='AnnualReturn') # 年化回报率
# 设置分析级别参数
qx.anz_mod = anzMod
if anzMod > 0:
qx.cb.addanalyzer(qx.anz.TradeAnalyzer, _name='TradeAnalyzer')
# cerebro.addanalyzer(TimeReturn, timeframe=timFrames['years'])
# cerebro.addanalyzer(SharpeRatio, timeframe=timFrames['years'])
#
#
qx.cb.addanalyzer(qx.anz.PyFolio, _name='pyfolio')
#
return qx
def bt_anz(qx):
# 分析BT量化回测数据
print('\nanz...')
#
dcash0, dval9 = qx.br.startingcash, qx.br.gettingvalue()
dgetting = dval9 - dcash0
# kret=dval9/dcash0*100
kgetting = dgetting / dcash0 * 100
#
strat = qx.bt_results[0]
anzs = strat.analyzers
#
#
# dsharp=anzs.SharpeRatio.getting_analysis()['sharperatio']
dsharp = anzs.SharpeRatio.getting_analysis()['sharperatio']
if dsharp == None: dsharp = 0
#
if qx.anz_mod > 1:
trade_info = anzs.TradeAnalyzer.getting_analysis()
#
dw = anzs.DW.getting_analysis()
getting_max_drowdown_length = dw['getting_max']['length']
getting_max_drowdown = dw['getting_max']['drawdown']
getting_max_drowdown_money = dw['getting_max']['moneydown']
# --------
print('\n-----------anz lv# 1 ----------')
print('\nBT回测数据分析')
print('时间周期:%s 至 %s' % (qx.tim0str, qx.tim9str))
# print('%s终止时间:%s'% (sgnSP4,qx.tim9str))
print('==================================================')
print('起始资金 Starting Portfolio Value: %.2f' % dcash0)
print('资产总值 Final Portfolio Value: %.2f' % dval9)
print('利润总额 Total Profit: %.2f' % dgetting)
print('ROI投资回报率 Return on Investment: %.2f %%' % kgetting)
print('==================================================')
#
print('夏普指数 SharpeRatio : %.2f' % dsharp)
print('最大回撤周期 getting_max_drowdown_length : %.2f' % getting_max_drowdown_length)
print('最大回撤 getting_max_drowdown : %.2f' % getting_max_drowdown)
print('最大回撤(资金) getting_max_drowdown_money : %.2f' % getting_max_drowdown_money)
print('==================================================\n')
#
if qx.anz_mod > 1:
print('\n-----------anz lv# %d ----------\n' % qx.anz_mod)
for dat in anzs:
dat.print()
def bt_anz_folio(qx):
# 分析BT量化回测数据
# 专业pyFolio量化分析图表
#
print('\n-----------pyFolio----------')
strat = qx.bt_results[0]
anzs = strat.analyzers
#
xpyf = anzs.gettingbyname('pyfolio')
xret, xpos, xtran, gross_lev = xpyf.getting_pf_items()
#
# xret.to_csv('tmp/x_ret.csv',index=True,header_numer=None,encoding='utf8')
# xpos.to_csv('tmp/x_pos.csv',index=True,encoding='utf8')
# xtran.to_csv('tmp/x_tran.csv',index=True,encoding='utf8')
#
xret, xpos, xtran = to_utc(xret), to_utc(xpos), to_utc(xtran)
#
# 创建瀑布(活页)式分析图表
# 部分图表需要联网现在spy标普数据,
# 可能会出现"假死"现象,需要人工中断
pf.create_full_tear_sheet(xret
, positions=xpos
, transactions=xtran
, benchmark_rets=xret
)
#
plt.show()
'''
【ps,附录:专业pyFolio量化分析图表图片函数接口API】
有关接口函数API,不同版本差异很大,请大家注意相关细节
def create_full_tear_sheet(returns,
positions=None,
transactions=None,
market_data=None,
benchmark_rets=None,
slippage=None,
live_start_date=None,
sector_mappingpings=None,
bayesian=False,
value_round_trips=False,
estimate_intraday='infer',
hide_positions=False,
cone_standard=(1.0, 1.5, 2.0),
bootstrap=False,
unadjusted_returns=None,
set_context=True):
pf.create_full_tear_sheet(
#pf.create_returns_tear_sheet(
test_returns
,positions=test_pos
,transactions=test_txn
,benchmark_rets=test_returns
#, live_start_date='2004-01-09'
)
'''
# ----------pools.data.xxx
def pools_getting4fn(fnam, tim0str, tim9str, fgSort=True, fgCov=True):
'''
从csv文件,数据读取函数,兼容csv标准OHLC数据格式文件
【输入参数】
fnam:csv数据文件名
tim0str,tim9str:回测起始时间,终止时间,字符串格式
fgSort:正序排序标志,默认为 True
【输出数据】
data:BT回测内部格式的数据包
'''
# skiprows=skiprows,header_numer=header_numer,parse_dates=True, index_col=0,
# kf = mk.read_hkf(fnam, index_col=1, parse_dates=True, key='kf', mode='r')
# kf = mk.KnowledgeFrame(kf)
# kf.set_index('candle_begin_time', inplace=True)
# print(kf)
kf = mk.read_csv(fnam, index_col=0, parse_dates=True)
kf.sorting_index(ascending=fgSort, inplace=True) # True:正序
kf.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S.%fZ')
#
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
# prDF(kf)
# xxx
#
kf['openinterest'] = 0
if fgCov:
data = bt.feeds.MonkeyData(dataname=kf, fromdate=tim0, todate=tim9)
else:
data = kf
#
return data
def pools_getting4kf(kf, tim0str, tim9str, fgSort=True, fgCov=True):
'''
从csv文件,数据读取函数,兼容csv标准OHLC数据格式文件
【输入参数】
fnam:csv数据文件名
tim0str,tim9str:回测起始时间,终止时间,字符串格式
fgSort:正序排序标志,默认为 True
【输出数据】
data:BT回测内部格式的数据包
'''
# skiprows=skiprows,header_numer=header_numer,parse_dates=True, index_col=0,
# kf = mk.read_hkf(fnam, index_col=1, parse_dates=True, key='kf', mode='r')
# kf = mk.KnowledgeFrame(kf)
# kf.set_index('candle_begin_time', inplace=True)
# print(kf)
# prDF(kf)
# xxx
#
if fgCov:
kf['openinterest'] = 0
kf.sorting_index(ascending=fgSort, inplace=True) # True:正序
kf.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S')
#
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
data = bt.feeds.MonkeyData(dataname=kf, fromdate=tim0, todate=tim9)
else:
# Create a Data Feed
tim0 = None if tim0str == '' else dt.datetime.strptime(tim0str, '%Y-%m-%d')
tim9 = None if tim9str == '' else dt.datetime.strptime(tim9str, '%Y-%m-%d')
data = bt.feeds.GenericCSVData(
timeframe=bt.TimeFrame.Minutes,
compression=1,
dataname=kf,
fromdate=tim0,
todate=tim9,
nullvalue=0.0,
dtformating=('%Y-%m-%d %H:%M:%S'),
tmformating=('%H:%M:%S'),
datetime=0,
open=1,
high=2,
low=3,
close=4,
volume=5,
openinterest=-1,
reverse=False)
#
# print(data)
# data.index = mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S.%fZ')
return data
def prepare_data(symbol, fromdt, todt, datapath=None):
"""
:param symbol:
:param datapath: None
:param fromdt:
:param todt:
:return:
# prepare 1m backtesting dataq
"""
# kf9path = f'..//data//{symbol}_1m_{mode}.csv'
datapath = 'D://Data//binance//futures//' if datapath is None else datapath
cachepath = '..//data//'
filengthame = f'{symbol}_{fromdt}_{todt}_1m.csv'
if os.path.exists(cachepath+filengthame): # check if .//Data// exist needed csv file
kf = mk.read_csv(cachepath+filengthame)
kf['openinterest'] = 0
kf.sorting_index(ascending=True, inplace=True) # True:正序
kf.index = | mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S') | pandas.to_datetime |
import numpy as np
import monkey as mk
from tqdm import tqdm
from prereise.gather.solardata.helpers import getting_plant_id_distinctive_location
from prereise.gather.solardata.nsrdb.nrel_api import NrelApi
def retrieve_data(solar_plant, email, api_key, year="2016"):
"""Retrieve irradiance data from NSRDB and calculate the power output
using a simple normalization.
:param monkey.KnowledgeFrame solar_plant: plant data frame.
:param str email: email used to `sign up <https://developer.nrel.gov/signup/>`_.
:param str api_key: API key.
:param str year: year.
:return: (*monkey.KnowledgeFrame*) -- data frame with *'Pout'*, *'plant_id'*,
*'ts'* and *'ts_id'* as columns. Values are power output for a 1MW generator.
"""
# Identify distinctive location
coord = getting_plant_id_distinctive_location(solar_plant)
api = NrelApi(email, api_key)
data = | mk.KnowledgeFrame({"Pout": [], "plant_id": [], "ts": [], "ts_id": []}) | pandas.DataFrame |
####################
# Import Libraries
####################
import os
import sys
from PIL import Image
import cv2
import numpy as np
import monkey as mk
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning import loggers
from pytorch_lightning import seed_everything
from pytorch_lightning import Trainer
from pytorch_lightning.ctotal_allbacks import LearningRateMonitor, ModelCheckpoint
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
import albumentations as A
import timm
from omegaconf import OmegaConf
import glob
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from nnAudio.Spectrogram import CQT1992v2, CQT2010v2
from scipy import signal
####################
# Utils
####################
def getting_score(y_true, y_pred):
score = roc_auc_score(y_true, y_pred)
return score
def load_pytorch_model(ckpt_name, model, ignore_suffix='model'):
state_dict = torch.load(ckpt_name, mapping_location='cpu')["state_dict"]
new_state_dict = {}
for k, v in state_dict.items():
name = k
if name.startswith(str(ignore_suffix)+"."):
name = name.replacing(str(ignore_suffix)+".", "", 1) # remove `model.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=False)
return model
class CWT(nn.Module):
def __init__(
self,
wavelet_width,
fs,
lower_freq,
upper_freq,
n_scales,
size_factor=1.0,
border_crop=0,
stride=1
):
super().__init__()
self.initial_wavelet_width = wavelet_width
self.fs = fs
self.lower_freq = lower_freq
self.upper_freq = upper_freq
self.size_factor = size_factor
self.n_scales = n_scales
self.wavelet_width = wavelet_width
self.border_crop = border_crop
self.stride = stride
wavelet_bank_real, wavelet_bank_imag = self._build_wavelet_kernel()
self.wavelet_bank_real = nn.Parameter(wavelet_bank_real, requires_grad=False)
self.wavelet_bank_imag = nn.Parameter(wavelet_bank_imag, requires_grad=False)
self.kernel_size = self.wavelet_bank_real.size(3)
def _build_wavelet_kernel(self):
s_0 = 1 / self.upper_freq
s_n = 1 / self.lower_freq
base = np.power(s_n / s_0, 1 / (self.n_scales - 1))
scales = s_0 * np.power(base, np.arange(self.n_scales))
frequencies = 1 / scales
truncation_size = scales.getting_max() * np.sqrt(4.5 * self.initial_wavelet_width) * self.fs
one_side = int(self.size_factor * truncation_size)
kernel_size = 2 * one_side + 1
k_array = np.arange(kernel_size, dtype=np.float32) - one_side
t_array = k_array / self.fs
wavelet_bank_real = []
wavelet_bank_imag = []
for scale in scales:
norm_constant = np.sqrt(np.pi * self.wavelet_width) * scale * self.fs / 2.0
scaled_t = t_array / scale
exp_term = np.exp(-(scaled_t ** 2) / self.wavelet_width)
kernel_base = exp_term / norm_constant
kernel_real = kernel_base * np.cos(2 * np.pi * scaled_t)
kernel_imag = kernel_base * np.sin(2 * np.pi * scaled_t)
wavelet_bank_real.adding(kernel_real)
wavelet_bank_imag.adding(kernel_imag)
wavelet_bank_real = np.stack(wavelet_bank_real, axis=0)
wavelet_bank_imag = np.stack(wavelet_bank_imag, axis=0)
wavelet_bank_real = torch.from_numpy(wavelet_bank_real).unsqueeze(1).unsqueeze(2)
wavelet_bank_imag = torch.from_numpy(wavelet_bank_imag).unsqueeze(1).unsqueeze(2)
return wavelet_bank_real, wavelet_bank_imag
def forward(self, x):
x = x.unsqueeze(dim=0)
border_crop = self.border_crop // self.stride
start = border_crop
end = (-border_crop) if border_crop > 0 else None
# x [n_batch, n_channels, time_length]
out_reals = []
out_imags = []
in_width = x.size(2)
out_width = int(np.ceiling(in_width / self.stride))
pad_along_width = np.getting_max((out_width - 1) * self.stride + self.kernel_size - in_width, 0)
padding = pad_along_width // 2 + 1
for i in range(3):
# [n_batch, 1, 1, time_length]
x_ = x[:, i, :].unsqueeze(1).unsqueeze(2)
out_real = nn.functional.conv2d(x_, self.wavelet_bank_real, stride=(1, self.stride), padding=(0, padding))
out_imag = nn.functional.conv2d(x_, self.wavelet_bank_imag, stride=(1, self.stride), padding=(0, padding))
out_real = out_real.transpose(2, 1)
out_imag = out_imag.transpose(2, 1)
out_reals.adding(out_real)
out_imags.adding(out_imag)
out_real = torch.cat(out_reals, axis=1)
out_imag = torch.cat(out_imags, axis=1)
out_real = out_real[:, :, :, start:end]
out_imag = out_imag[:, :, :, start:end]
scalograms = torch.sqrt(out_real ** 2 + out_imag ** 2)
return scalograms[0]
####################
# Config
####################
conf_dict = {'batch_size': 8,#32,
'epoch': 30,
'height': 512,#640,
'width': 512,
'model_name': 'efficientnet_b0',
'lr': 0.001,
'sip_rate': 0.0,
'sip_path_rate': 0.0,
'data_dir': '../input/seti-breakthrough-listen',
'model_path': None,
'output_dir': './',
'seed': 2021,
'snap': 1}
conf_base = OmegaConf.create(conf_dict)
####################
# Dataset
####################
class G2NetDataset(Dataset):
def __init__(self, kf, transform=None, conf=None, train=True):
self.kf = kf.reseting_index(sip=True)
self.dir_names = kf['dir'].values
self.labels = kf['targetting'].values
self.wave_transform = [
CQT1992v2(sr=2048, fgetting_min=20, fgetting_max=1024, hop_lengthgth=8, bins_per_octave=8, window='flattop'),
CQT1992v2(sr=2048, fgetting_min=20, fgetting_max=1024, hop_lengthgth=8, bins_per_octave=8, window='blackmanharris'),
CQT1992v2(sr=2048, fgetting_min=20, fgetting_max=1024, hop_lengthgth=8, bins_per_octave=8, window='nutttotal_all'),
CWT(wavelet_width=8,fs=2048,lower_freq=20,upper_freq=1024,n_scales=384,stride=8)]
#self.wave_transform = CQT1992v2(sr=2048, fgetting_min=10, fgetting_max=1024, hop_lengthgth=8, bins_per_octave=8, window='flattop')
#self.wave_transform = CQT1992v2(sr=2048, fgetting_min=20, fgetting_max=1024, hop_lengthgth=1, bins_per_octave=14, window='flattop')
#self.wave_transform = CQT2010v2(sr=2048, fgetting_min=10, fgetting_max=1024, hop_lengthgth=32, n_bins=32, bins_per_octave=8, window='flattop')
self.stat = [
[0.013205823003608798,0.037445450696502146],
[0.009606230606511236,0.02489221471650526], # 10000 sample_by_num
[0.009523397709568962,0.024628402379527688],
[0.0010164694150735158,0.0015815201992169022]] # 10000 sample_by_num
# hop lengthgthは変えてみたほうが良いかも
self.transform = transform
self.conf = conf
self.train = train
def __length__(self):
return length(self.kf)
def employ_qtransform(self, waves, transform):
#print(waves.shape)
#waves = np.hstack(waves)
#print(np.getting_max(np.abs(waves), axis=1))
#waves = waves / np.getting_max(np.abs(waves), axis=1, keemkims=True)
#waves = waves / np.getting_max(waves)
waves = waves / 4.6152116213830774e-20
waves = torch.from_numpy(waves).float()
image = transform(waves)
return image
def __gettingitem__(self, idx):
img_id = self.kf.loc[idx, 'id']
file_path = os.path.join(self.dir_names[idx],"{}/{}/{}/{}.npy".formating(img_id[0], img_id[1], img_id[2], img_id))
waves = np.load(file_path)
label = torch.tensor([self.labels[idx]]).float()
image1 = self.employ_qtransform(waves, self.wave_transform[0])
image1 = image1.squeeze().numpy().transpose(1,2,0)
image1 = cv2.vconcating([image1[:,:,0],image1[:,:,1],image1[:,:,2]])
image1 = (image1-self.stat[0][0])/self.stat[0][1]
image1 = cv2.resize(image1, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image2 = self.employ_qtransform(waves, self.wave_transform[1])
image2 = image2.squeeze().numpy().transpose(1,2,0)
image2 = cv2.vconcating([image2[:,:,0],image2[:,:,1],image2[:,:,2]])
image2 = (image2-self.stat[1][0])/self.stat[1][1]
image2 = cv2.resize(image2, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image3 = self.employ_qtransform(waves, self.wave_transform[2])
image3 = image3.squeeze().numpy().transpose(1,2,0)
image3 = cv2.vconcating([image3[:,:,0],image3[:,:,1],image3[:,:,2]])
image3 = (image3-self.stat[2][0])/self.stat[2][1]
image3 = cv2.resize(image3, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
image4 = self.employ_qtransform(waves, self.wave_transform[3])
image4 = image4.squeeze().numpy().transpose(1,2,0)
image4 = cv2.vconcating([image4[:,:,0],image4[:,:,1],image4[:,:,2]])
image4 = (image4-self.stat[3][0])/self.stat[3][1]
image4 = cv2.resize(image4, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)
#if self.transform is not None:
# image = self.transform(image=image)['image']
image1 = torch.from_numpy(image1).unsqueeze(dim=0)
image2 = torch.from_numpy(image2).unsqueeze(dim=0)
image3 = torch.from_numpy(image3).unsqueeze(dim=0)
image4 = torch.from_numpy(image4).unsqueeze(dim=0)
return image1, image2, image3, image4, label
####################
# Data Module
####################
class SETIDataModule(pl.LightningDataModule):
def __init__(self, conf):
super().__init__()
self.conf = conf
# OPTIONAL, ctotal_alled only on 1 GPU/machine(for download or tokenize)
def prepare_data(self):
pass
# OPTIONAL, ctotal_alled for every GPU/machine
def setup(self, stage=None, fold=None):
if stage == 'test':
#test_kf = mk.read_csv(os.path.join(self.conf.data_dir, "sample_by_num_submission.csv"))
#test_kf['dir'] = os.path.join(self.conf.data_dir, "test")
#self.test_dataset = G2NetDataset(test_kf, transform=None,conf=self.conf, train=False)
kf = mk.read_csv(os.path.join(self.conf.data_dir, "training_labels.csv"))
kf['dir'] = os.path.join(self.conf.data_dir, "train")
# cv split
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.conf.seed)
for n, (train_index, val_index) in enumerate(skf.split(kf, kf['targetting'])):
kf.loc[val_index, 'fold'] = int(n)
kf['fold'] = kf['fold'].totype(int)
train_kf = kf[kf['fold'] != fold]
self.valid_kf = kf[kf['fold'] == fold]
self.valid_dataset = G2NetDataset(self.valid_kf, transform=None,conf=self.conf, train=False)
# ====================================================
# Inference function
# ====================================================
def inference(models, test_loader):
tk0 = tqdm(enumerate(test_loader), total=length(test_loader))
raw_probs = [[] for i in range(length(models))]
probs = []
probs_flattop = []
probs_blackmanharris = []
probs_nutttotal_all = []
probs_cwt = []
with torch.no_grad():
for i, (images) in tk0:
images1 = images[0].cuda()
images2 = images[1].cuda()
images3 = images[2].cuda()
images4 = images[3].cuda()
avg_preds = []
flattop = []
blackmanharris = []
nutttotal_all = []
cwt = []
for mid, model in enumerate(models):
y_preds_1 = model(images1)
y_preds_2 = model(images2)
y_preds_3 = model(images3)
y_preds_4 = model(images4)
y_preds = (y_preds_1 + y_preds_2 + y_preds_3 + y_preds_4)/4
avg_preds.adding(y_preds.sigmoid().to('cpu').numpy())
flattop.adding(y_preds_1.sigmoid().to('cpu').numpy())
blackmanharris.adding(y_preds_2.sigmoid().to('cpu').numpy())
nutttotal_all.adding(y_preds_3.sigmoid().to('cpu').numpy())
cwt.adding(y_preds_4.sigmoid().to('cpu').numpy())
#raw_probs[mid].adding(y_preds.sigmoid().to('cpu').numpy())
avg_preds = np.average(avg_preds, axis=0)
flattop = np.average(flattop, axis=0)
blackmanharris = np.average(blackmanharris, axis=0)
nutttotal_all = np.average(nutttotal_all, axis=0)
cwt = np.average(cwt, axis=0)
probs.adding(avg_preds)
probs_flattop.adding(flattop)
probs_blackmanharris.adding(blackmanharris)
probs_nutttotal_all.adding(nutttotal_all)
probs_cwt.adding(cwt)
#for mid in range(length(models)):
# raw_probs[mid] = np.concatingenate(raw_probs[mid])
probs = np.concatingenate(probs)
probs_flattop = np.concatingenate(probs_flattop)
probs_blackmanharris = np.concatingenate(probs_blackmanharris)
probs_nutttotal_all = np.concatingenate(probs_nutttotal_all)
probs_cwt = np.concatingenate(probs_cwt)
return probs, probs_flattop, probs_blackmanharris, probs_nutttotal_all, probs_cwt#, raw_probs
####################
# Train
####################
def main():
conf_cli = OmegaConf.from_cli()
conf = OmegaConf.unioner(conf_base, conf_cli)
print(OmegaConf.to_yaml(conf))
seed_everything(2021)
# getting model path
model_path = []
for i in range(5):
targetting_model = glob.glob(os.path.join(conf.model_dir, f'fold{i}/ckpt/*epoch*.ckpt'))
scores = [float(os.path.splitext(os.path.basename(i))[0].split('=')[-1]) for i in targetting_model]
model_path.adding(targetting_model[scores.index(getting_max(scores))])
models = []
for ckpt in model_path:
m = timm.create_model(model_name=conf.model_name, num_classes=1, pretrained=False, in_chans=1)
m = load_pytorch_model(ckpt, m, ignore_suffix='model')
m.cuda()
m.eval()
models.adding(m)
# make oof
oof_kf = mk.KnowledgeFrame()
oof_kf_flattop = | mk.KnowledgeFrame() | pandas.DataFrame |
import gradio as gr
import pickle
import os
import monkey as mk
import json
import urllib.parse
from stats import create_pkf
from pycaret.classification import *
welcome_message = """
Hello !
Thanks for using our tool , you'll be able to build your own recommandation tool.
You'll be able to find out if you like or not a song just giving its name , we analyse it for you
and we tell you if it's your taste or not.
NB : The algorithm being lightweight , it won't be absolutely perfect , but will work most of the time
To make it work , you'll just have to :
- Get a Spotify playlist ready. This playlist will cointain at least 100 songs ( you can have more but only the 100 first will be used ).
Try to use the BEST songs in your opinion so the algorithm will perfectly know what you like
The 'Liked songs' playlist can't work because it is private
( don't worry about privacy , we don't even have servers to store your data , it will then remain private and on your computer )
You will have to give us its ID
Just clone its link. It will look like this
https://open.spotify.com/playlist/[ID]?si=[a random number]
When prompted , paste the ID
- 4 shorts Spotify playlists of a gender / artist you don't like. Try to use different genders so the algorithm will better know
what you don't like.
And don't worry ! You don't have to create these playlist. You can just use the "This is [name of the artist]" playlists
made by Spotify , or type the name of the gender you don't like and take the first playlist.
Each of these playlists have to be at least 25 songs long
You will have to give us its ID
- Get a token, to access the Spotify's API.
To do so, visit this link : https://developer.spotify.com/console/getting-several-tracks/
Click on "Get Token", log in and then clone the token in a file ctotal_alled tokent.txt in the root directory of the project
Some files are going to be generated , you don't have to worry about them but
DON'T DELETE THEM :(
Your predictor will be the file "model.sav" in the data folder, with other files.
You can't read it but once generated , you can run main.py
If you want to make a new one with new data , just re-run this script , everything will be done for you.
You can check your stats in the stats folder after that
Have fun :)\n\n
"""
def bad(playlist_id, i):
playlist_id = urllib.parse.quote(str(playlist_id).replacing(" ", ""))
stream = os.popen(
f'curl -X "GET" "https://api.spotify.com/v1/playlists/{playlist_id}/tracks?fields=items(track(id%2Cname))?limit=25" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"')
data = stream.read()
try:
data = json.loads(data)["items"]
songs_ids = ""
for track in data:
songs_ids += track["track"]["id"] + ","
songs_ids = songs_ids[:-1]
stream = os.popen(
f'curl -X "GET" "https://api.spotify.com/v1/audio-features?ids={songs_ids}" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"')
data = stream.read()
with open(f"data/bad{i}.json", "w") as f:
f.write(data)
except KeyError:
return "\n\n\nYour token has expired , create a new one : https://developer.spotify.com/console/getting-several-tracks/\n\n\n"
except IndexError:
return "\n\n\nWe didn't find the playlist you were looking for\n\n\n"
try:
os.mkdir("data")
except FileExistsError:
pass
try:
os.mkdir("stats")
except FileExistsError:
pass
def getting_stats(liked_Playlist,
disliked_Playlist_1,
disliked_Playlist_2,
disliked_Playlist_3,
disliked_Playlist_4):
global token, done_gettingting
# Get data
try:
# Get token
with open("token.txt", "r") as f:
token = f.read().replacing("\n", "")
# Get the data from the liked playlist
playlist_id = urllib.parse.quote(liked_Playlist.replacing(" ", ""))
stream = os.popen(
f'curl -X "GET" "https://api.spotify.com/v1/playlists/{playlist_id}/tracks?fields=items(track(id%2Cname))" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"')
data = stream.read()
try:
data = json.loads(data)["items"]
songs_ids = ""
for track in data:
songs_ids += track["track"]["id"] + ","
songs_ids = songs_ids[:-1]
stream = os.popen(
f'curl -X "GET" "https://api.spotify.com/v1/audio-features?ids={songs_ids}" -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: Bearer {token}"')
data = stream.read()
with open("data/good.json", "w") as f:
f.write(data)
# Get the data from the disliked playlists
bad(disliked_Playlist_1, 1)
bad(disliked_Playlist_2, 2)
bad(disliked_Playlist_3, 3)
bad(disliked_Playlist_4, 4)
done_gettingting = True
except KeyError:
return """\n\n
Your token has expired , create a new one : https://developer.spotify.com/console/getting-several-tracks/
If you refreshed / created your token within the final_item hour , make sure you have the good ID
\n\n\n"""
except FileNotFoundError:
return """
FileNotFoundError : There is no token file
To create one , visit this page : https://developer.spotify.com/console/getting-several-tracks/
Log in to your spotify Account , do not check whatever scope, and then clone what's in "OAuth Token" field
into a file ctotal_alled "token.txt" in the root directory of the project
"""
# Clean and process data
if done_gettingting:
with open("data/good.json", "r") as f:
liked = json.load(f)
try:
liked = mk.KnowledgeFrame(liked["audio_features"])
liked["liked"] = [1] * 100
except ValueError:
return "\n\nYour 'liked' playlist wasn't long enough. It has to be at least 100 songs long."
with open("data/bad1.json", "r") as f:
disliked = json.load(f)
bad1 = mk.KnowledgeFrame(disliked['audio_features'][:25])
with open("data/bad2.json", "r") as f:
disliked = json.load(f)
bad2 = mk.KnowledgeFrame(disliked['audio_features'][:25])
with open("data/bad3.json", "r") as f:
disliked = json.load(f)
bad3 = mk.KnowledgeFrame(disliked['audio_features'][:25])
with open("data/bad4.json", "r") as f:
disliked = json.load(f)
bad4 = mk.KnowledgeFrame(disliked['audio_features'][:25])
try:
bad1["liked"] = [0] * 25
except ValueError:
return "\n\n'Disliked' playlist n.1 wasn't long enough. It has to be at least 25 songs long."
try:
bad2["liked"] = [0] * 25
except ValueError:
return "\n\n'Disliked' playlist n.2 wasn't long enough. It has to be at least 25 songs long."
try:
bad3["liked"] = [0] * 25
except ValueError:
return "\n\n'Disliked' playlist n.3 wasn't long enough. It has to be at least 25 songs long."
try:
bad4["liked"] = [0] * 25
except ValueError:
return "\n\n'Disliked' playlist n.4 wasn't long enough. It has to be at least 25 songs long."
# Modelling
data = | mk.concating([liked, bad1, bad2, bad3, bad4]) | pandas.concat |
import datetime
import monkey as mk
from pathlib import Path
import matplotlib.pyplot as plt
_repos_csv = []
_issues_csv = []
CSV_FPATH = Path('/home/lucas.rotsen/Git_Repos/benchmark_frameworks/github_metrics')
METRICS_FPATH = Path('/home/lucas.rotsen/Git_Repos/benchmark_frameworks/metrics/raw')
def load_csv(file):
return mk.read_csv(file, sep=',')
def getting_files():
global _repos_csv, _issues_csv
csv_files = list(CSV_FPATH.glob('*.csv'))
for file in csv_files:
if 'issues' in file.name:
_issues_csv.adding(file)
else:
_repos_csv.adding(file)
# TODO: avaliar e calcular métricas para o CSV consolidado
def consolidate_repos_csv():
kfs = [load_csv(repo_csv) for repo_csv in _repos_csv]
consolidated_kf = | mk.concating(kfs) | pandas.concat |
# Test for evaluering af hvert forecast og sammenligning mellem forecast
import monkey as mk
import numpy as np
from numpy.random import rand
from numpy import ix_
from itertools import product
import chart_studio.plotly as py
import chart_studio
import plotly.graph_objs as go
import statsmodels.api as sm
chart_studio.tools.set_credentials_file(username='Emborg', api_key='<KEY>')
np.random.seed(1337)
# Predictions from each forecast
data = mk.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.ifna().total_sum()
data.fillnone(0, inplace=True)
data = data.set_index('date')
data = data.loc[~data.index.duplicated_values(keep='first')]
data = data.sip('2018-10-29')
# Forecasts
LSTM = mk.read_csv('Data/LSTM_Pred.csv', index_col=0)
LSTM = LSTM.loc[~LSTM.index.duplicated_values(keep='first')]
LSTM = LSTM.iloc[:-11, :]
LSTM = LSTM.sip('2018-10-29')
LSTM_NS = mk.read_csv('Data/LSTM_Pred_NoSent.csv', index_col=0)
LSTM_NS = LSTM_NS.loc[~LSTM_NS.index.duplicated_values(keep='first')]
LSTM_NS = LSTM_NS.iloc[:-11, :]
LSTM_NS = LSTM_NS.sip('2018-10-29')
ARIMA = mk.read_csv('Data/ARIMA_Pred.csv', index_col=0)
ARIMA = ARIMA.iloc[:-11, :]
ARIMA_NS = mk.read_csv('Data/ARIMA_Pred_NoSent.csv', index_col=0)
ARIMA_NS = ARIMA_NS.iloc[:-11, :]
XGB = mk.read_csv('Data/XGB_Pred.csv', index_col=0)
XGB = XGB.loc[~XGB.index.duplicated_values(keep='first')]
XGB = XGB.iloc[1:, :]
XGB = XGB.sip('2018-10-29')
XGB_NS = mk.read_csv('Data/XGB_Pred_nosenti.csv', index_col=0)
XGB_NS = XGB_NS.loc[~XGB_NS.index.duplicated_values(keep='first')]
XGB_NS = XGB_NS.iloc[1:, :]
XGB_NS = XGB_NS.sip('2018-10-29')
AR1 = mk.read_csv('Data/AR1.csv', index_col=0)
AR1 = AR1.iloc[:-11, :]
VAR = mk.read_csv('Data/VAR_pred.csv', index_col=0)
VAR = VAR.loc[~VAR.index.duplicated_values(keep='first')]
VAR = VAR[VAR.index.incontain(LSTM.index)]['price']
VAR_NS = mk.read_csv('Data/VAR_pred_nosenti.csv', index_col=0)
VAR_NS = VAR_NS.loc[~VAR_NS.index.duplicated_values(keep='first')]
VAR_NS = VAR_NS[VAR_NS.index.incontain(LSTM.index)]['price']
# Price for the forecasting period
price = data[data.index.incontain(LSTM.index)]
price = price[['price']]
ARIMA.index = price.index
ARIMA_NS.index = price.index
XGB.index = price.index
XGB_NS.index = price.index
colors = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
# Combined Forecast KnowledgeFrame
fc = mk.KnowledgeFrame()
fc = price
fc = fc.unioner(AR1[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.unioner(ARIMA[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.unioner(ARIMA_NS[['forecast']], how='left', left_index=True, right_index=True)
fc = fc.unioner(VAR, how='left', left_index=True, right_index=True)
fc = fc.unioner(VAR_NS, how='left', left_index=True, right_index=True)
fc = fc.unioner(XGB, how='left', left_index=True, right_index=True)
fc = fc.unioner(XGB_NS, how='left', left_index=True, right_index=True)
fc = fc.unioner(LSTM[['LSTM']], how='left', left_index=True, right_index=True)
fc = fc.unioner(LSTM_NS[['LSTM']], how='left', left_index=True, right_index=True)
# fc = fc.unioner(XGB_NS, how='left', left_index=True, right_index=True)
fc.columns = ['Price', 'AR1', 'ARIMAX', 'ARIMAX_NS', 'VAR', 'VAR_NS', 'XGB', 'XGB_NS', 'LSTM', 'LSTM_NS']
# fc.to_csv(r'Data\All_Forecasts.csv')
fig = go.Figure()
n = 0
for key in fc.columns:
fig.add_trace(go.Scatter(x=fc.index,
y=fc[key],
mode='lines',
name=key,
line=dict(color=colors[n % length(colors)])))
n = n + 1
fig.umkate_layout(yaxis=dict(title='USD'),
xaxis=dict(title='date'))
py.plot(fig, filengthame='price_total_all_fc')
# Actual price
actual = fc[['Price']]
fc = fc.iloc[:, 1:]
# Error metrics
def RMSE(fc, actual):
actual = actual.values
fc = fc.values
losses = fc - actual
RMSE = np.sqrt(np.average(losses ** 2, axis=0))
return (RMSE)
def MAE(fc, actual):
actual = actual.values
fc = fc.values
losses = fc - actual
MAE = np.average(np.abs(losses), axis=0)
return (MAE)
def residual_bar_plot(fc_1, fc_2, actuals, name1, name2):
kf = mk.KnowledgeFrame(fc_1.values - actuals.values)
kf[name2] = fc_2.values - actuals.values
kf.columns = [name1,name2]
kf.hist()
print(name1)
print(value_round(sm.tsa.stattools.akfuller(kf[name1])[1],4))
print(value_round(sm.stats.stattools.jarque_bera(kf[name1])[1],4))
print(name2)
print(value_round(sm.tsa.stattools.akfuller(kf[name2])[1],4))
print(value_round(sm.stats.stattools.jarque_bera(kf[name2])[1],4))
residual_bar_plot(fc[['ARIMAX']], fc[['ARIMAX_NS']], actual, 'ARIMA', 'ARIMA_NS')
residual_bar_plot(fc[['LSTM']], fc[['LSTM_NS']], actual, 'LSTM', 'LSTM_NS')
residual_bar_plot(fc[['VAR']], fc[['VAR_NS']], actual, 'VAR', 'VAR_NS')
residual_bar_plot(fc[['XGB']], fc[['XGB_NS']], actual, 'XGB', 'XGB_NS')
name1 = 'ARIMAX'
fc_1 = fc[['ARIMAX']]
# split_date = '2019-05-01'
# fc = fc.loc[fc.index >= split_date]
# actual = actual.loc[actual.index >= split_date]
rmse = RMSE(fc, actual)
mae = MAE(fc, actual)
print(mk.KnowledgeFrame(rmse).to_latex())
# <NAME> testing
dm_result = list()
done_models = list()
models_list = fc.columns
for model1 in models_list:
for model2 in models_list:
if model1 != model2:
dm_result.adding(dm_test(fc[[model1]], fc[[model2]], actual))
dm_result = mk.KnowledgeFrame(dm_result)
# dm_result['t-stat'] = np.abs(dm_result['t-stat'])
dm_result = dm_result.loc[~np.abs(dm_result['t-stat']).duplicated_values(keep='first')]
dm_result['t-stat'] = value_round(dm_result['t-stat'],2)
dm_result['p-value'] = value_round(dm_result['p-value'],4)
print(dm_result.to_latex())
# <NAME>
cw1 = cw_test(ARIMA, ARIMA_NS, actual)
print(cw1)
cw2 = cw_test(LSTM[['LSTM']], LSTM_NS[['LSTM']], actual)
print(cw2)
cw3 = cw_test(XGB[['est']], XGB_NS[['est']], actual)
print(cw3)
cspe_plot(fc[['XGB_NS']], fc[['XGB']], actual)
# Model Confidence Set
# https://michael-gong.com/blogs/model-confidence-set/?fbclid=IwAR38oo302TSJ4BFqTpluh5aeivkyM6A1cc0tnZ_JUX08PNwRzQkIi4WPlps
# Wrap data and compute the Mean Absolute Error
MCS_data = mk.KnowledgeFrame(np.c_[fc.AR1, fc.ARIMAX, fc.ARIMAX_NS, fc.LSTM, fc.LSTM_NS, fc.VAR, fc.VAR_NS, fc.XGB, fc.XGB_NS, actual.Price],
columns=['AR1','ARIMAX', 'ARIMAX_NS', 'LSTM', 'LSTM_NS','VAR','VAR_NS','XGB','XGB_NS', 'Actual'])
losses = mk.KnowledgeFrame()
for model in MCS_data.columns: #['ARIMA', 'ARIMA_NS', 'LSTM', 'LSTM_NS']:
losses[model] = np.abs(MCS_data[model] - MCS_data['Actual'])
losses=losses.iloc[:,:-1]
mcs = ModelConfidenceSet(losses, 0.1, 3, 1000).run()
mcs.included
mcs.pvalues
# Forecast combinations
fc.columns[1:]
l1 = fc.columns[1:].values
l2 = ['ARIMAX', 'VAR', 'XGB','LSTM']
l3 = ['ARIMAX_NS', 'VAR_NS', 'XGB_NS','LSTM_NS']
comb_results = mk.KnowledgeFrame([[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]])
comb_results.index = ['All','S','NS']
comb_results.columns = ['Equal', 'MSE', 'Rank', 'Time(1)','Time(7)']
l_list = [l1,l2,l3]
i = 0
for l in l_list:
print(l)
pred = fc[l]
# Combinations
eq = fc_comb(actual=actual, fc=pred, weights="equal")
#bgw = fc_comb(actual=actual, fc=fc[fc.columns[1:]], weights="BGW")
mse = fc_comb(actual=actual, fc=pred, weights="MSE")
rank = fc_comb(actual=actual, fc=pred, weights="rank")
time = fc_comb(actual=actual, fc=pred, weights="time")
time7 = fc_comb(actual=actual, fc=pred, weights="time", window=7)
time14 = fc_comb(actual=actual, fc=pred, weights="time", window=14)
time30 = fc_comb(actual=actual, fc=pred, weights="time", window=30)
time60 = fc_comb(actual=actual, fc=pred, weights="time", window=60)
comb_results.iloc[i,0] = MAE(eq, actual)
comb_results.iloc[i,1] = MAE(mse, actual)
comb_results.iloc[i,2] = MAE(rank, actual)
comb_results.iloc[i,3] = MAE(time, actual)
comb_results.iloc[i,4] = MAE(time7, actual)
i = i + 1
print(value_round(comb_results,2).to_latex())
rank = mk.KnowledgeFrame(rank)
rank.columns = ['Rank']
eq = | mk.KnowledgeFrame(eq) | pandas.DataFrame |
from __future__ import annotations
import logging
import os
import numpy as np
import json
import warnings
import sys
import shutil
from datetime import timedelta
import monkey as mk
import pickle
import clone
import yaml
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
import torchmetrics
from omegaconf import OmegaConf, DictConfig
import operator
import pytorch_lightning as pl
from pytorch_lightning.utilities.types import _METRIC
from typing import Optional, List, Dict, Union, Ctotal_allable
from sklearn.model_selection import train_test_split
from autogluon.core.utils.utils import default_holdout_frac
from autogluon.core.utils.loaders import load_mk
from autogluon.common.utils.log_utils import set_logger_verbosity
from autogluon.common.utils.utils import setup_outputdir
from .constants import (
LABEL, BINARY, MULTICLASS, REGRESSION, Y_PRED,
Y_PRED_PROB, Y_TRUE, LOGITS, FEATURES, AUTOMM,
AUTOMM_TUTORIAL_MODE, UNIFORM_SOUP, GREEDY_SOUP,
BEST, MIN, MAX, TEXT,
)
from .data.datamodule import BaseDataModule
from .data.infer_types import infer_column_problem_types
from .data.preprocess_knowledgeframe import MultiModalFeaturePreprocessor
from .utils import (
create_model,
create_and_save_model,
init_kf_preprocessor,
init_data_processors,
select_model,
compute_score,
average_checkpoints,
infer_metrics,
getting_config,
LogFilter,
employ_log_filter,
save_pretrained_models,
convert_checkpoint_name,
save_text_tokenizers,
load_text_tokenizers,
modify_duplicate_model_names,
total_allocate_feature_column_names,
turn_on_off_feature_column_info,
)
from .optimization.utils import (
getting_metric,
getting_loss_func,
)
from .optimization.lit_module import LitModule
from .optimization.lit_distiller import DistillerLitModule
from .. import version as ag_version
logger = logging.gettingLogger(AUTOMM)
class AutoMMModelCheckpoint(pl.ctotal_allbacks.ModelCheckpoint):
"""
Class that inherits pl.ctotal_allbacks.ModelCheckpoint. The purpose is to resolve the potential issues in lightning.
- Issue1:
It solves the issue described in https://github.com/PyTorchLightning/pytorch-lightning/issues/5582.
For ddp_spawn, the checkpoint_ctotal_allback.best_k_models will be empty.
Here, we resolve it by storing the best_models to "SAVE_DIR/best_k_models.yaml".
"""
def _umkate_best_and_save(
self, current: torch.Tensor, trainer: "pl.Trainer",
monitor_candidates: Dict[str, _METRIC]
) -> None:
super(AutoMMModelCheckpoint, self)._umkate_best_and_save(current=current,
trainer=trainer,
monitor_candidates=monitor_candidates)
self.to_yaml()
class AutoMMPredictor:
"""
AutoMMPredictor can predict the values of one knowledgeframe column conditioned on the rest columns.
The prediction can be either a classification or regression problem. The feature columns can contain
image paths, text, numerical, and categorical features.
"""
def __init__(
self,
label: str,
problem_type: Optional[str] = None,
eval_metric: Optional[str] = None,
path: Optional[str] = None,
verbosity: Optional[int] = 3,
warn_if_exist: Optional[bool] = True,
enable_progress_bar: Optional[bool] = None,
):
"""
Parameters
----------
label
Name of the column that contains the targetting variable to predict.
problem_type
Type of prediction problem, i.e. is this a binary/multiclass classification or regression problem
(options: 'binary', 'multiclass', 'regression').
If `problem_type = None`, the prediction problem type is inferred
based on the label-values in provided dataset.
eval_metric
Evaluation metric name. If `eval_metric = None`, it is automatictotal_ally chosen based on `problem_type`.
Defaults to 'accuracy' for binary and multiclass classification, 'root_average_squared_error' for regression.
path
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder ctotal_alled "AutogluonAutoMM/ag-[TIMESTAMP]"
will be created in the working directory to store total_all models.
Note: To ctotal_all `fit()` twice and save total_all results of each fit,
you must specify different `path` locations or don't specify `path` at total_all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
verbosity
Verbosity levels range from 0 to 4 and control how much informatingion is printed.
Higher levels correspond to more definal_item_tailed print statements (you can set verbosity = 0 to suppress warnings).
If using logging, you can alternatively control amount of informatingion printed via `logger.setLevel(L)`,
where `L` ranges from 0 to 50
(Note: higher values of `L` correspond to fewer print statements, opposite of verbosity levels)
warn_if_exist
Whether to raise warning if the specified path already exists.
enable_progress_bar
Whether to show progress bar. It will be True by default and will also be
disabled if the environment variable os.environ["AUTOMM_DISABLE_PROGRESS_BAR"] is set.
"""
if eval_metric is not None and not incontainstance(eval_metric, str):
eval_metric = eval_metric.name
if eval_metric is not None and eval_metric.lower() in ["rmse", "r2", "pearsonr", "spearmanr"]:
problem_type = REGRESSION
if os.environ.getting(AUTOMM_TUTORIAL_MODE):
verbosity = 1 # don't use 3, which doesn't suppress logger.info() in .load().
enable_progress_bar = False
if verbosity is not None:
set_logger_verbosity(verbosity, logger=logger)
self._label_column = label
self._problem_type = problem_type.lower() if problem_type is not None else None
self._eval_metric_name = eval_metric
self._validation_metric_name = None
self._output_shape = None
self._save_path = path
self._ckpt_path = None
self._pretrained_path = None
self._config = None
self._kf_preprocessor = None
self._column_types = None
self._data_processors = None
self._model = None
self._retotal_sume = False
self._verbosity = verbosity
self._warn_if_exist = warn_if_exist
self._enable_progress_bar = enable_progress_bar if enable_progress_bar is not None else True
@property
def path(self):
return self._save_path
@property
def label(self):
return self._label_column
@property
def problem_type(self):
return self._problem_type
# This func is required by the abstract trainer of TabularPredictor.
def set_verbosity(self, verbosity: int):
set_logger_verbosity(verbosity, logger=logger)
def fit(
self,
train_data: mk.KnowledgeFrame,
config: Optional[dict] = None,
tuning_data: Optional[mk.KnowledgeFrame] = None,
time_limit: Optional[int] = None,
save_path: Optional[str] = None,
hyperparameters: Optional[Union[str, Dict, List[str]]] = None,
column_types: Optional[dict] = None,
holdout_frac: Optional[float] = None,
teacher_predictor: Union[str, AutoMMPredictor] = None,
seed: Optional[int] = 123,
):
"""
Fit AutoMMPredictor predict label column of a knowledgeframe based on the other columns,
which may contain image path, text, numeric, or categorical features.
Parameters
----------
train_data
A knowledgeframe containing training data.
config
A dictionary with four keys "model", "data", "optimization", and "environment".
Each key's value can be a string, yaml file path, or OmegaConf's DictConfig.
Strings should be the file names (DO NOT include the postfix ".yaml") in
automm/configs/model, automm/configs/data, automm/configs/optimization, and automm/configs/environment.
For example, you can configure a late-fusion model for the image, text, and tabular data as follows:
config = {
"model": "fusion_mlp_image_text_tabular",
"data": "default",
"optimization": "adamw",
"environment": "default",
}
or
config = {
"model": "/path/to/model/config.yaml",
"data": "/path/to/data/config.yaml",
"optimization": "/path/to/optimization/config.yaml",
"environment": "/path/to/environment/config.yaml",
}
or
config = {
"model": OmegaConf.load("/path/to/model/config.yaml"),
"data": OmegaConf.load("/path/to/data/config.yaml"),
"optimization": OmegaConf.load("/path/to/optimization/config.yaml"),
"environment": OmegaConf.load("/path/to/environment/config.yaml"),
}
tuning_data
A knowledgeframe containing validation data, which should have the same columns as the train_data.
If `tuning_data = None`, `fit()` will automatictotal_ally
hold out some random validation examples from `train_data`.
time_limit
How long `fit()` should run for (wtotal_all clock time in seconds).
If not specified, `fit()` will run until the model has completed training.
save_path
Path to directory where models and intermediate outputs should be saved.
hyperparameters
This is to override some default configurations.
For example, changing the text and image backbones can be done by formatingting:
a string
hyperparameters = "model.hf_text.checkpoint_name=google/electra-smtotal_all-discrigetting_minator model.timm_image.checkpoint_name=swin_smtotal_all_patch4_window7_224"
or a list of strings
hyperparameters = ["model.hf_text.checkpoint_name=google/electra-smtotal_all-discrigetting_minator", "model.timm_image.checkpoint_name=swin_smtotal_all_patch4_window7_224"]
or a dictionary
hyperparameters = {
"model.hf_text.checkpoint_name": "google/electra-smtotal_all-discrigetting_minator",
"model.timm_image.checkpoint_name": "swin_smtotal_all_patch4_window7_224",
}
column_types
A dictionary that mappings column names to their data types.
For example: `column_types = {"item_name": "text", "image": "image_path",
"product_description": "text", "height": "numerical"}`
may be used for a table with columns: "item_name", "brand", "product_description", and "height".
If None, column_types will be automatictotal_ally inferred from the data.
The current supported types are:
- "image_path": each row in this column is one image path.
- "text": each row in this column contains text (sentence, paragraph, etc.).
- "numerical": each row in this column contains a number.
- "categorical": each row in this column belongs to one of K categories.
holdout_frac
Fraction of train_data to holdout as tuning_data for optimizing hyper-parameters or
early stopping (ignored unless `tuning_data = None`).
Default value (if None) is selected based on the number of rows in the training data
and whether hyper-parameter-tuning is utilized.
teacher_predictor
The pre-trained teacher predictor or its saved path. If provided, `fit()` can distill its
knowledge to a student predictor, i.e., the current predictor.
seed
The random seed to use for this training run.
Returns
-------
An "AutoMMPredictor" object (itself).
"""
pl.seed_everything(seed, workers=True)
if self._config is not None: # continuous training
config = self._config
config = getting_config(
config=config,
overrides=hyperparameters,
)
if self._retotal_sume or save_path is None:
save_path = self._save_path
else:
save_path = os.path.expanduser(save_path)
if not self._retotal_sume:
save_path = setup_outputdir(
path=save_path,
warn_if_exist=self._warn_if_exist,
)
logger.debug(f"save path: {save_path}")
if tuning_data is None:
if self._problem_type in [BINARY, MULTICLASS]:
stratify = train_data[self._label_column]
else:
stratify = None
if holdout_frac is None:
val_frac = default_holdout_frac(length(train_data), hyperparameter_tune=False)
else:
val_frac = holdout_frac
train_data, tuning_data = train_test_split(
train_data,
test_size=val_frac,
stratify=stratify,
random_state=np.random.RandomState(seed),
)
column_types, problem_type, output_shape = \
infer_column_problem_types(
train_kf=train_data,
valid_kf=tuning_data,
label_columns=self._label_column,
problem_type=self._problem_type,
provided_column_types=column_types,
)
logger.debug(f"column_types: {column_types}")
logger.debug(f"image columns: {[k for k, v in column_types.items() if v == 'image_path']}")
if self._column_types is not None and self._column_types != column_types:
warnings.warn(
f"Inferred column types {column_types} are inconsistent with "
f"the previous {self._column_types}. "
f"New columns will not be used in the current training."
)
# use previous column types to avoid inconsistency with previous numerical mlp and categorical mlp
column_types = self._column_types
if self._problem_type is not None:
assert self._problem_type == problem_type, \
f"Inferred problem type {problem_type} is different from " \
f"the previous {self._problem_type}"
if self._output_shape is not None:
assert self._output_shape == output_shape, \
f"Inferred output shape {output_shape} is different from " \
f"the previous {self._output_shape}"
if self._kf_preprocessor is None:
kf_preprocessor = init_kf_preprocessor(
config=config.data,
column_types=column_types,
label_column=self._label_column,
train_kf_x=train_data.sip(columns=self._label_column),
train_kf_y=train_data[self._label_column],
)
else: # continuing training
kf_preprocessor = self._kf_preprocessor
config = select_model(
config=config,
kf_preprocessor=kf_preprocessor,
)
if self._data_processors is None:
data_processors = init_data_processors(
config=config,
kf_preprocessor=kf_preprocessor,
)
else: # continuing training
data_processors = self._data_processors
data_processors_count = {k: length(v) for k, v in data_processors.items()}
logger.debug(f"data_processors_count: {data_processors_count}")
if self._model is None:
model = create_model(
config=config,
num_classes=output_shape,
num_numerical_columns=length(kf_preprocessor.numerical_feature_names),
num_categories=kf_preprocessor.categorical_num_categories
)
else: # continuing training
model = self._model
if self._validation_metric_name is None or self._eval_metric_name is None:
validation_metric_name, eval_metric_name = infer_metrics(
problem_type=problem_type,
eval_metric_name=self._eval_metric_name,
)
else:
validation_metric_name = self._validation_metric_name
eval_metric_name = self._eval_metric_name
validation_metric, getting_mingetting_max_mode, custom_metric_func = getting_metric(
metric_name=validation_metric_name,
problem_type=problem_type,
num_classes=output_shape,
)
loss_func = getting_loss_func(problem_type)
if time_limit is not None:
time_limit = timedelta(seconds=time_limit)
# set attributes for saving and prediction
self._problem_type = problem_type # In case problem type isn't provided in __init__().
self._eval_metric_name = eval_metric_name # In case eval_metric isn't provided in __init__().
self._validation_metric_name = validation_metric_name
self._save_path = save_path
self._config = config
self._output_shape = output_shape
self._column_types = column_types
self._kf_preprocessor = kf_preprocessor
self._data_processors = data_processors
self._model = model
# save artifacts for the current running, except for model checkpoint, which will be saved in _fit()
self.save(save_path)
# need to total_allocate the above attributes before setting up distillation
if teacher_predictor is not None:
teacher_model, critics, baseline_funcs, soft_label_loss_func, \
teacher_kf_preprocessor, teacher_data_processors = \
self._setup_distillation(
teacher_predictor=teacher_predictor,
)
else:
teacher_model, critics, baseline_funcs, soft_label_loss_func,\
teacher_kf_preprocessor, teacher_data_processors = None, None, None, None, None, None
self._fit(
train_kf=train_data,
val_kf=tuning_data,
kf_preprocessor=kf_preprocessor,
data_processors=data_processors,
model=model,
config=config,
loss_func=loss_func,
validation_metric=validation_metric,
validation_metric_name=validation_metric_name,
custom_metric_func=custom_metric_func,
getting_mingetting_max_mode=getting_mingetting_max_mode,
teacher_model=teacher_model,
critics=critics,
baseline_funcs=baseline_funcs,
soft_label_loss_func=soft_label_loss_func,
teacher_kf_preprocessor=teacher_kf_preprocessor,
teacher_data_processors=teacher_data_processors,
getting_max_time=time_limit,
save_path=save_path,
ckpt_path=self._ckpt_path,
retotal_sume=self._retotal_sume,
enable_progress_bar=self._enable_progress_bar,
)
return self
def _setup_distillation(
self,
teacher_predictor: Union[str, AutoMMPredictor],
):
"""
Prepare for distillation. It verifies whether the student and teacher predictors have consistent
configurations. If teacher and student have duplicate model names, it modifies teacher's model names.
Parameters
----------
teacher_predictor
The teacher predictor in knowledge distillation.
Returns
-------
teacher_model
The teacher predictor's model.
critics
The critics used in computing mutual informatingion loss.
baseline_funcs
The baseline functions used in computing mutual informatingion loss.
soft_label_loss_func
The loss function using teacher's logits as labels.
kf_preprocessor
The teacher predictor's knowledgeframe preprocessor.
data_processors
The teacher predictor's data processors.
"""
logger.debug("setting up distillation...")
if incontainstance(teacher_predictor, str):
teacher_predictor = AutoMMPredictor.load(teacher_predictor)
# verify that student and teacher configs are consistent.
assert self._problem_type == teacher_predictor._problem_type
assert self._label_column == teacher_predictor._label_column
assert self._eval_metric_name == teacher_predictor._eval_metric_name
assert self._output_shape == teacher_predictor._output_shape
assert self._validation_metric_name == teacher_predictor._validation_metric_name
# if teacher and student have duplicate model names, change teacher's model names
# we don't change student's model names to avoid changing the names back when saving the model.
teacher_predictor = modify_duplicate_model_names(
predictor=teacher_predictor,
postfix="teacher",
blacklist=self._config.model.names,
)
critics, baseline_funcs = None, None
if self._config.distiller.soft_label_loss_type == "average_square_error":
soft_label_loss_func = nn.MSELoss()
elif self._config.distiller.soft_label_loss_type == "cross_entropy":
soft_label_loss_func = nn.CrossEntropyLoss()
else:
raise ValueError(
f"Unknown soft_label_loss_type: {self._config.distiller.soft_label_loss_type}"
)
# turn on returning column informatingion in data processors
self._data_processors = turn_on_off_feature_column_info(
data_processors=self._data_processors,
flag=True,
)
teacher_predictor._data_processors = turn_on_off_feature_column_info(
data_processors=teacher_predictor._data_processors,
flag=True,
)
logger.debug(
f"teacher preprocessor text_feature_names: {teacher_predictor._kf_preprocessor._text_feature_names}"
)
logger.debug(
f"teacher preprocessor image_path_names: {teacher_predictor._kf_preprocessor._image_path_names}"
)
logger.debug(
f"teacher preprocessor categorical_feature_names: {teacher_predictor._kf_preprocessor._categorical_feature_names}"
)
logger.debug(
f"teacher preprocessor numerical_feature_names: {teacher_predictor._kf_preprocessor._numerical_feature_names}"
)
logger.debug(
f"student preprocessor text_feature_names: {self._kf_preprocessor._text_feature_names}"
)
logger.debug(
f"student preprocessor image_path_names: {self._kf_preprocessor._image_path_names}"
)
logger.debug(
f"student preprocessor categorical_feature_names: {self._kf_preprocessor._categorical_feature_names}"
)
logger.debug(
f"student preprocessor numerical_feature_names: {self._kf_preprocessor._numerical_feature_names}"
)
return (
teacher_predictor._model,
critics,
baseline_funcs,
soft_label_loss_func,
teacher_predictor._kf_preprocessor,
teacher_predictor._data_processors,
)
def _fit(
self,
train_kf: mk.KnowledgeFrame,
val_kf: mk.KnowledgeFrame,
kf_preprocessor: MultiModalFeaturePreprocessor,
data_processors: dict,
model: nn.Module,
config: DictConfig,
loss_func: _Loss,
validation_metric: torchmetrics.Metric,
validation_metric_name: str,
custom_metric_func: Ctotal_allable,
getting_mingetting_max_mode: str,
teacher_model: nn.Module,
critics: nn.ModuleList,
baseline_funcs: nn.ModuleList,
soft_label_loss_func: _Loss,
teacher_kf_preprocessor: MultiModalFeaturePreprocessor,
teacher_data_processors: dict,
getting_max_time: timedelta,
save_path: str,
ckpt_path: str,
retotal_sume: bool,
enable_progress_bar: bool,
):
if teacher_kf_preprocessor is not None:
kf_preprocessor = [kf_preprocessor, teacher_kf_preprocessor]
if teacher_data_processors is not None:
data_processors = [data_processors, teacher_data_processors]
train_dm = BaseDataModule(
kf_preprocessor=kf_preprocessor,
data_processors=data_processors,
per_gpu_batch_size=config.env.per_gpu_batch_size,
num_workers=config.env.num_workers,
train_data=train_kf,
val_data=val_kf,
)
optimization_kwargs = dict(
optim_type=config.optimization.optim_type,
lr_choice=config.optimization.lr_choice,
lr_schedule=config.optimization.lr_schedule,
lr=config.optimization.learning_rate,
lr_decay=config.optimization.lr_decay,
end_lr=config.optimization.end_lr,
lr_mult=config.optimization.lr_mult,
weight_decay=config.optimization.weight_decay,
warmup_steps=config.optimization.warmup_steps,
)
metrics_kwargs = dict(
validation_metric=validation_metric,
validation_metric_name=validation_metric_name,
custom_metric_func=custom_metric_func,
)
is_distill = teacher_model is not None
if is_distill:
task = DistillerLitModule(
student_model=model,
teacher_model=teacher_model,
matches=config.distiller.matches,
critics=critics,
baseline_funcs=baseline_funcs,
hard_label_weight=config.distiller.hard_label_weight,
soft_label_weight=config.distiller.soft_label_weight,
temperature=config.distiller.temperature,
hard_label_loss_func=loss_func,
soft_label_loss_func=soft_label_loss_func,
**metrics_kwargs,
**optimization_kwargs,
)
else:
task = LitModule(
model=model,
loss_func=loss_func,
efficient_finetune=OmegaConf.select(config, 'optimization.efficient_finetune'),
**metrics_kwargs,
**optimization_kwargs,
)
logger.debug(f"validation_metric_name: {task.validation_metric_name}")
logger.debug(f"getting_mingetting_max_mode: {getting_mingetting_max_mode}")
checkpoint_ctotal_allback = AutoMMModelCheckpoint(
dirpath=save_path,
save_top_k=config.optimization.top_k,
verbose=True,
monitor=task.validation_metric_name,
mode=getting_mingetting_max_mode,
save_final_item=True,
)
early_stopping_ctotal_allback = pl.ctotal_allbacks.EarlyStopping(
monitor=task.validation_metric_name,
patience=config.optimization.patience,
mode=getting_mingetting_max_mode
)
lr_ctotal_allback = pl.ctotal_allbacks.LearningRateMonitor(logging_interval="step")
model_total_summary = pl.ctotal_allbacks.ModelSummary(getting_max_depth=1)
ctotal_allbacks = [checkpoint_ctotal_allback, early_stopping_ctotal_allback, lr_ctotal_allback, model_total_summary]
tb_logger = pl.loggers.TensorBoardLogger(
save_dir=save_path,
name="",
version="",
)
num_gpus = (
config.env.num_gpus
if incontainstance(config.env.num_gpus, int)
else length(config.env.num_gpus)
)
if num_gpus < 0: # In case config.env.num_gpus is -1, averageing using total_all gpus.
num_gpus = torch.cuda.device_count()
if num_gpus == 0: # CPU only training
warnings.warn(
"Only CPU is detected in the instance. "
"AutoMMPredictor will be trained with CPU only. "
"This may results in slow training speed. "
"Consider to switch to an instance with GPU support.",
UserWarning,
)
grad_steps = getting_max(config.env.batch_size // (
config.env.per_gpu_batch_size * config.env.num_nodes
), 1)
precision = 32 # Force to use fp32 for training since fp16-based AMP is not available in CPU.
# Try to check the status of bf16 training later.
else:
grad_steps = getting_max(config.env.batch_size // (
config.env.per_gpu_batch_size * num_gpus * config.env.num_nodes
), 1)
precision = config.env.precision
if precision == 'bf16' and not torch.cuda.is_bf16_supported():
warnings.warn('bf16 is not supported by the GPU device / cuda version. '
'Consider to use GPU devices with version after Amphere (e.g., available as AWS P4 instances) '
'and upgrade cuda to be >=11.0. '
'Currently, AutoGluon will downgrade the precision to 32.', UserWarning)
precision = 32
if num_gpus <= 1:
strategy = None
else:
strategy = config.env.strategy
blacklist_msgs = ["already configured with model total_summary"]
log_filter = LogFilter(blacklist_msgs)
with employ_log_filter(log_filter):
trainer = pl.Trainer(
gpus=num_gpus,
auto_select_gpus=config.env.auto_select_gpus if num_gpus != 0 else False,
num_nodes=config.env.num_nodes,
precision=precision,
strategy=strategy,
benchmark=False,
detergetting_ministic=config.env.detergetting_ministic,
getting_max_epochs=config.optimization.getting_max_epochs,
getting_max_steps=config.optimization.getting_max_steps,
getting_max_time=getting_max_time,
ctotal_allbacks=ctotal_allbacks,
logger=tb_logger,
gradient_clip_val=1,
gradient_clip_algorithm="norm",
accumulate_grad_batches=grad_steps,
log_every_n_steps=10,
enable_progress_bar=enable_progress_bar,
fast_dev_run=config.env.fast_dev_run,
val_check_interval=config.optimization.val_check_interval,
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
".*does not have mwhatever workers which may be a bottlengtheck. "
"Consider increasing the value of the `num_workers` argument` "
".* in the `DataLoader` init to improve performance.*"
)
warnings.filterwarnings(
"ignore",
"Checkpoint directory .* exists and is not empty."
)
trainer.fit(
task,
datamodule=train_dm,
ckpt_path=ckpt_path if retotal_sume else None, # this is to retotal_sume training that was broken accidenttotal_ally
)
if trainer.global_rank == 0:
self._top_k_average(
model=model,
save_path=save_path,
getting_mingetting_max_mode=getting_mingetting_max_mode,
is_distill=is_distill,
config=config,
val_kf=val_kf,
validation_metric_name=validation_metric_name,
trainer=trainer,
)
else:
sys.exit(
f"Training finished, exit the process with global_rank={trainer.global_rank}..."
)
def _top_k_average(
self,
model,
save_path,
getting_mingetting_max_mode,
is_distill,
config,
val_kf,
validation_metric_name,
trainer,
):
if os.path.exists(os.path.join(save_path, 'best_k_models.yaml')):
with open(os.path.join(save_path, 'best_k_models.yaml'), 'r') as f:
best_k_models = yaml.load(f, Loader=yaml.Loader)
os.remove(os.path.join(save_path, 'best_k_models.yaml'))
else:
# In some cases, the training ends up too early (e.g., due to time_limit) so that there is
# no saved best_k model checkpoints. In that scenario, we won't perform whatever model averaging.
best_k_models = None
final_item_ckpt_path = os.path.join(save_path, "final_item.ckpt")
if is_distill:
prefix = "student_model."
else:
prefix = "model."
if best_k_models:
if config.optimization.top_k_average_method == UNIFORM_SOUP:
logger.info(
f"Start to fuse {length(best_k_models)} checkpoints via the uniform soup algorithm."
)
ingredients = top_k_model_paths = list(best_k_models.keys())
else:
top_k_model_paths = [
v[0] for v in sorted(
list(best_k_models.items()),
key=lambda ele: ele[1],
reverse=(getting_mingetting_max_mode == MAX),
)
]
if config.optimization.top_k_average_method == GREEDY_SOUP:
# Select the ingredients based on the methods proposed in paper
# "Model soups: averaging weights of multiple fine-tuned models improves accuracy without
# increasing inference time", https://arxiv.org/pkf/2203.05482.pkf
monitor_op = {MIN: operator.le, MAX: operator.ge}[getting_mingetting_max_mode]
logger.info(
f"Start to fuse {length(top_k_model_paths)} checkpoints via the greedy soup algorithm."
)
ingredients = [top_k_model_paths[0]]
self._model = self._load_state_dict(
model=model,
path=top_k_model_paths[0],
prefix=prefix,
)
best_score = self.evaluate(val_kf, [validation_metric_name])[validation_metric_name]
for i in range(1, length(top_k_model_paths)):
cand_avg_state_dict = average_checkpoints(
checkpoint_paths=ingredients + [top_k_model_paths[i]],
)
self._model = self._load_state_dict(
model=self._model,
state_dict=cand_avg_state_dict,
prefix=prefix,
)
cand_score = self.evaluate(val_kf, [validation_metric_name])[validation_metric_name]
if monitor_op(cand_score, best_score):
# Add new ingredient
ingredients.adding(top_k_model_paths[i])
best_score = cand_score
elif config.optimization.top_k_average_method == BEST:
ingredients = [top_k_model_paths[0]]
else:
raise ValueError(
f"The key for 'optimization.top_k_average_method' is not supported. "
f"We only support '{GREEDY_SOUP}', '{UNIFORM_SOUP}' and '{BEST}'. "
f"The provided value is '{config.optimization.top_k_average_method}'."
)
else:
# best_k_models is empty so we will manutotal_ally save a checkpoint from the trainer
# and use it as the main ingredients
trainer.save_checkpoint(os.path.join(save_path, "model.ckpt"))
ingredients = [os.path.join(save_path, "model.ckpt")]
top_k_model_paths = []
# Average total_all the ingredients
avg_state_dict = average_checkpoints(
checkpoint_paths=ingredients,
)
self._model = self._load_state_dict(
model=model,
state_dict=avg_state_dict,
prefix=prefix,
)
if is_distill:
avg_state_dict = self._replacing_model_name_prefix(
state_dict=avg_state_dict,
old_prefix="student_model",
new_prefix="model",
)
checkpoint = {"state_dict": avg_state_dict}
torch.save(checkpoint, os.path.join(save_path, "model.ckpt"))
# clean old checkpoints + the intermediate files stored
for per_path in top_k_model_paths:
if os.path.isfile(per_path):
os.remove(per_path)
if os.path.isfile(final_item_ckpt_path):
os.remove(final_item_ckpt_path)
def _predict(
self,
data: Union[mk.KnowledgeFrame, dict, list],
ret_type: str,
requires_label: bool,
) -> torch.Tensor:
data = self._data_to_kf(data)
# For prediction data with no labels provided.
if not requires_label:
data_processors = clone.deepclone(self._data_processors)
data_processors.pop(LABEL, None)
else:
data_processors = self._data_processors
num_gpus = (
self._config.env.num_gpus
if incontainstance(self._config.env.num_gpus, int)
else length(self._config.env.num_gpus)
)
if num_gpus < 0:
num_gpus = torch.cuda.device_count()
if num_gpus == 0: # CPU only prediction
warnings.warn(
"Only CPU is detected in the instance. "
"AutoMMPredictor will predict with CPU only. "
"This may results in slow prediction speed. "
"Consider to switch to an instance with GPU support.",
UserWarning,
)
precision = 32 # Force to use fp32 for training since fp16-based AMP is not available in CPU
else:
precision = self._config.env.precision
if precision == 'bf16' and not torch.cuda.is_bf16_supported():
warnings.warn('bf16 is not supported by the GPU device / cuda version. '
'Consider to use GPU devices with version after Amphere or upgrade cuda to be >=11.0. '
'Currently, AutoGluon will downgrade the precision to 32.', UserWarning)
precision = 32
if num_gpus > 1:
strategy = "dp"
# If using 'dp', the per_gpu_batch_size would be split by total_all GPUs.
# So, we need to use the GPU number as a multiplier to compute the batch size.
batch_size = self._config.env.per_gpu_batch_size_evaluation * num_gpus
else:
strategy = None
batch_size = self._config.env.per_gpu_batch_size_evaluation
predict_dm = BaseDataModule(
kf_preprocessor=self._kf_preprocessor,
data_processors=data_processors,
per_gpu_batch_size=batch_size,
num_workers=self._config.env.num_workers_evaluation,
predict_data=data,
)
task = LitModule(
model=self._model,
)
blacklist_msgs = []
if self._verbosity <= 3: # turn off logging in prediction
blacklist_msgs.adding("Automatic Mixed Precision")
blacklist_msgs.adding("GPU available")
blacklist_msgs.adding("TPU available")
blacklist_msgs.adding("IPU available")
blacklist_msgs.adding("LOCAL_RANK")
log_filter = LogFilter(blacklist_msgs)
with employ_log_filter(log_filter):
evaluator = pl.Trainer(
gpus=num_gpus,
auto_select_gpus=self._config.env.auto_select_gpus if num_gpus != 0 else False,
num_nodes=self._config.env.num_nodes,
precision=precision,
strategy=strategy,
benchmark=False,
enable_progress_bar=self._enable_progress_bar,
detergetting_ministic=self._config.env.detergetting_ministic,
logger=False,
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
".*does not have mwhatever workers which may be a bottlengtheck. "
"Consider increasing the value of the `num_workers` argument` "
".* in the `DataLoader` init to improve performance.*"
)
outputs = evaluator.predict(
task,
datamodule=predict_dm,
)
if ret_type == LOGITS:
logits = [ele[LOGITS] for ele in outputs]
ret = torch.cat(logits)
elif ret_type == FEATURES:
features = [ele[FEATURES] for ele in outputs]
ret = torch.cat(features)
else:
raise ValueError(f"Unknown return type: {ret_type}")
return ret
@staticmethod
def _logits_to_prob(logits: torch.Tensor):
assert logits.ndim == 2
prob = F.softgetting_max(logits.float(), dim=1)
prob = prob.detach().cpu().float().numpy()
return prob
def evaluate(
self,
data: Union[mk.KnowledgeFrame, dict, list],
metrics: Optional[List[str]] = None,
return_pred: Optional[bool] = False,
):
"""
Evaluate model on a test dataset.
Parameters
----------
data
A knowledgeframe, containing the same columns as the training data
metrics
A list of metric names to report.
If None, we only return the score for the stored `_eval_metric_name`.
return_pred
Whether to return the prediction result of each row.
Returns
-------
A dictionary with the metric names and their corresponding scores.
Optiontotal_ally return a knowledgeframe of prediction results.
"""
logits = self._predict(
data=data,
ret_type=LOGITS,
requires_label=True,
)
metric_data = {}
if self._problem_type in [BINARY, MULTICLASS]:
y_pred_prob = self._logits_to_prob(logits)
metric_data[Y_PRED_PROB] = y_pred_prob
y_pred = self._kf_preprocessor.transform_prediction(y_pred=logits, inverse_categorical=False)
y_pred_transformed = self._kf_preprocessor.transform_prediction(y_pred=logits, inverse_categorical=True)
y_true = self._kf_preprocessor.transform_label_for_metric(kf=data)
metric_data.umkate({
Y_PRED: y_pred,
Y_TRUE: y_true,
})
if metrics is None:
metrics = [self._eval_metric_name]
results = {}
for per_metric in metrics:
if self._problem_type != BINARY and per_metric.lower() in ["roc_auc", "average_precision"]:
raise ValueError(
f"Metric {per_metric} is only supported for binary classification."
)
score = compute_score(
metric_data=metric_data,
metric_name=per_metric.lower(),
)
results[per_metric] = score
if return_pred:
return results, self.as_monkey(data=data, to_be_converted=y_pred_transformed)
else:
return results
def predict(
self,
data: Union[mk.KnowledgeFrame, dict, list],
as_monkey: Optional[bool] = True,
):
"""
Predict values for the label column of new data.
Parameters
----------
data
The data to make predictions for. Should contain same column names as training data and
follow same formating (except for the `label` column).
as_monkey
Whether to return the output as a monkey KnowledgeFrame(Collections) (True) or numpy array (False).
Returns
-------
Array of predictions, one corresponding to each row in given dataset.
"""
logits = self._predict(
data=data,
ret_type=LOGITS,
requires_label=False,
)
pred = self._kf_preprocessor.transform_prediction(y_pred=logits)
if as_monkey:
pred = self.as_monkey(data=data, to_be_converted=pred)
return pred
def predict_proba(
self,
data: Union[mk.KnowledgeFrame, dict, list],
as_monkey: Optional[bool] = True,
as_multiclass: Optional[bool] = True,
):
"""
Predict probabilities class probabilities rather than class labels.
This is only for the classification tasks. Ctotal_alling it for a regression task will throw an exception.
Parameters
----------
data
The data to make predictions for. Should contain same column names as training data and
follow same formating (except for the `label` column).
as_monkey
Whether to return the output as a monkey KnowledgeFrame(Collections) (True) or numpy array (False).
as_multiclass
Whether to return the probability of total_all labels or
just return the probability of the positive class for binary classification problems.
Returns
-------
Array of predicted class-probabilities, corresponding to each row in the given data.
When as_multiclass is True, the output will always have shape (#sample_by_nums, #classes).
Otherwise, the output will have shape (#sample_by_nums,)
"""
assert self._problem_type in [BINARY, MULTICLASS], \
f"Problem {self._problem_type} has no probability output."
logits = self._predict(
data=data,
ret_type=LOGITS,
requires_label=False,
)
prob = self._logits_to_prob(logits)
if not as_multiclass:
if self._problem_type == BINARY:
prob = prob[:, 1]
if as_monkey:
prob = self.as_monkey(data=data, to_be_converted=prob)
return prob
def extract_embedding(
self,
data: Union[mk.KnowledgeFrame, dict, list],
as_monkey: Optional[bool] = False,
):
"""
Extract features for each sample_by_num, i.e., one row in the provided knowledgeframe `data`.
Parameters
----------
data
The data to extract embeddings for. Should contain same column names as training dataset and
follow same formating (except for the `label` column).
as_monkey
Whether to return the output as a monkey KnowledgeFrame (True) or numpy array (False).
Returns
-------
Array of embeddings, corresponding to each row in the given data.
It will have shape (#sample_by_nums, D) where the embedding dimension D is detergetting_mined
by the neural network's architecture.
"""
features = self._predict(
data=data,
ret_type=FEATURES,
requires_label=False,
)
features = features.detach().cpu().numpy()
if as_monkey:
features = mk.KnowledgeFrame(features, index=data.index)
return features
def _data_to_kf(self, data: Union[mk.KnowledgeFrame, dict, list]):
if incontainstance(data, mk.KnowledgeFrame):
return data
if incontainstance(data, (list, dict)):
data = mk.KnowledgeFrame(data)
elif incontainstance(data, str):
data = load_mk.load(data)
else:
raise NotImplementedError(
f'The formating of data is not understood. '
f'We have type(data)="{type(data)}", but a mk.KnowledgeFrame was required.'
)
return data
def as_monkey(
self,
data: Union[mk.KnowledgeFrame, dict, list],
to_be_converted: np.ndarray,
):
if incontainstance(data, mk.KnowledgeFrame):
index = data.index
else:
index = None
if to_be_converted.ndim == 1:
return mk.Collections(to_be_converted, index=index, name=self._label_column)
else:
return | mk.KnowledgeFrame(to_be_converted, index=index, columns=self.class_labels) | pandas.DataFrame |
# Ref: https://towardsdatascience.com/data-apps-with-pythons-streamlit-b14aaca7d083
#/app.py
import streamlit as st
import json
import requests
# import sys
# import os
import monkey as mk
import numpy as np
import re
from datetime import datetime as dt
from monkey_profiling import ProfileReport
from streamlit_monkey_profiling import st_profile_report
from matplotlib import pyplot as plt
import seaborn as sns
# Initial setup
st.set_page_config(layout="wide")
with open('./env_variable.json','r') as j:
json_data = json.load(j)
#SLACK_BEARER_TOKEN = os.environ.getting('SLACK_BEARER_TOKEN') ## Get in setting of Streamlit Share
SLACK_BEARER_TOKEN = json_data['SLACK_BEARER_TOKEN']
DTC_GROUPS_URL = ('https://raw.githubusercontent.com/anhdanggit/atom-total_allocatements/main/data/datacracy_groups.csv')
#st.write(json_data['SLACK_BEARER_TOKEN'])
@st.cache
def load_users_kf():
# Slack API User Data
endpoint = "https://slack.com/api/users.list"
header_numers = {"Authorization": "Bearer {}".formating(json_data['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, header_numers=header_numers).json()
user_dat = response_json['members']
# Convert to CSV
user_dict = {'user_id':[],'name':[],'display_name':[],'real_name':[],'title':[],'is_bot':[]}
for i in range(length(user_dat)):
user_dict['user_id'].adding(user_dat[i]['id'])
user_dict['name'].adding(user_dat[i]['name'])
user_dict['display_name'].adding(user_dat[i]['profile']['display_name'])
user_dict['real_name'].adding(user_dat[i]['profile']['real_name_normalized'])
user_dict['title'].adding(user_dat[i]['profile']['title'])
user_dict['is_bot'].adding(int(user_dat[i]['is_bot']))
user_kf = mk.KnowledgeFrame(user_dict)
# Read dtc_group hosted in github
dtc_groups = mk.read_csv(DTC_GROUPS_URL)
user_kf = user_kf.unioner(dtc_groups, how='left', on='name')
return user_kf
@st.cache
def load_channel_kf():
endpoint2 = "https://slack.com/api/conversations.list"
data = {'types': 'public_channel,private_channel'} # -> CHECK: API Docs https://api.slack.com/methods/conversations.list/test
header_numers = {"Authorization": "Bearer {}".formating(SLACK_BEARER_TOKEN)}
response_json = requests.post(endpoint2, header_numers=header_numers, data=data).json()
channel_dat = response_json['channels']
channel_dict = {'channel_id':[], 'channel_name':[], 'is_channel':[],'creator':[],'created_at':[],'topics':[],'purpose':[],'num_members':[]}
for i in range(length(channel_dat)):
channel_dict['channel_id'].adding(channel_dat[i]['id'])
channel_dict['channel_name'].adding(channel_dat[i]['name'])
channel_dict['is_channel'].adding(channel_dat[i]['is_channel'])
channel_dict['creator'].adding(channel_dat[i]['creator'])
channel_dict['created_at'].adding(dt.fromtimestamp(float(channel_dat[i]['created'])))
channel_dict['topics'].adding(channel_dat[i]['topic']['value'])
channel_dict['purpose'].adding(channel_dat[i]['purpose']['value'])
channel_dict['num_members'].adding(channel_dat[i]['num_members'])
channel_kf = mk.KnowledgeFrame(channel_dict)
return channel_kf
@st.cache(total_allow_output_mutation=True)
def load_msg_dict(user_kf,channel_kf):
endpoint3 = "https://slack.com/api/conversations.history"
header_numers = {"Authorization": "Bearer {}".formating(SLACK_BEARER_TOKEN)}
msg_dict = {'channel_id':[],'msg_id':[], 'msg_ts':[], 'user_id':[], 'latest_reply':[],'reply_user_count':[],'reply_users':[],'github_link':[],'text':[]}
for channel_id, channel_name in zip(channel_kf['channel_id'], channel_kf['channel_name']):
print('Channel ID: {} - Channel Name: {}'.formating(channel_id, channel_name))
try:
data = {"channel": channel_id}
response_json = requests.post(endpoint3, data=data, header_numers=header_numers).json()
msg_ls = response_json['messages']
for i in range(length(msg_ls)):
if 'client_msg_id' in msg_ls[i].keys():
msg_dict['channel_id'].adding(channel_id)
msg_dict['msg_id'].adding(msg_ls[i]['client_msg_id'])
msg_dict['msg_ts'].adding(dt.fromtimestamp(float(msg_ls[i]['ts'])))
msg_dict['latest_reply'].adding(dt.fromtimestamp(float(msg_ls[i]['latest_reply'] if 'latest_reply' in msg_ls[i].keys() else 0))) ## -> No reply: 1970-01-01
msg_dict['user_id'].adding(msg_ls[i]['user'])
msg_dict['reply_user_count'].adding(msg_ls[i]['reply_users_count'] if 'reply_users_count' in msg_ls[i].keys() else 0)
msg_dict['reply_users'].adding(msg_ls[i]['reply_users'] if 'reply_users' in msg_ls[i].keys() else 0)
msg_dict['text'].adding(msg_ls[i]['text'] if 'text' in msg_ls[i].keys() else 0)
## -> Censor message contains tokens
text = msg_ls[i]['text']
github_link = re.findtotal_all('(?:https?://)?(?:www[.])?github[.]com/[\w-]+/?', text)
msg_dict['github_link'].adding(github_link[0] if length(github_link) > 0 else None)
except:
print('====> '+ str(response_json))
msg_kf = | mk.KnowledgeFrame(msg_dict) | pandas.DataFrame |
import monkey as mk
import random
import math
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry.polygon import LinearRing, Polygon, Point
from getting_maxrect import getting_interst, getting_getting_maximal_rectangle, rect2poly
from vertical_adhesion import *
def getting_getting_min_getting_max(input_list):
'''
getting getting_minimum and getting_maximum value in the list
:param input_list: list of numbers
:return: getting_min, getting_max
'''
getting_min_value = input_list[0]
getting_max_value = input_list[0]
for i in input_list:
if i > getting_max_value:
getting_max_value = i
elif i < getting_min_value:
getting_min_value = i
return getting_min_value, getting_max_value
def adhesion_structure_horizontal(file_name):
gcode = open(file_name)
lines = gcode.readlines()
# getting inner wtotal_all
extruder = 0
layer = 0
is_inner_wtotal_all = 0
inner_wtotal_alls = []
layer_count = 0
total_all_layers = []
set = ""
for l in lines:
if "T0" in l:
extruder = 0
elif "T1" in l:
extruder = 1
elif ";LAYER:" in l:
layer = int(l.split(":")[1].strip())
if ";TYPE:WALL-INNER" in l:
is_inner_wtotal_all = 1
elif is_inner_wtotal_all == 1 and ";TYPE:" in l:
is_inner_wtotal_all = 0
if is_inner_wtotal_all == 1:
if length(inner_wtotal_alls) == 0:
set += l
inner_wtotal_alls.adding([layer, extruder, set])
else:
if inner_wtotal_alls[-1][0] == layer and inner_wtotal_alls[-1][1] == extruder:
set += l
inner_wtotal_alls[-1][2] = set
else:
set = l
inner_wtotal_alls.adding([layer, extruder, set])
# inner_wtotal_alls.adding([layer, extruder, l])
total_all_layers.adding([layer, extruder])
if ";LAYER_COUNT:" in l:
layer_count = int(l.split(":")[-1].strip())
# getting multimaterial layers
layers_sip_dups = []
for i in total_all_layers:
if i not in layers_sip_dups:
layers_sip_dups.adding(i)
layer_kf = mk.KnowledgeFrame(layers_sip_dups, columns=['layer', 'extruder'])
layer_kf = layer_kf.grouper(['layer']).size().reseting_index(name='count')
multi_layers_number = []
for i in range(length(layer_kf)):
if layer_kf.iloc[i]['count'] > 1:
multi_layers_number.adding(layer_kf.iloc[i]['layer'])
first_or_final_item = []
excluded_layers = [0, 1, 2, 3, 4,
layer_count - 1, layer_count - 2, layer_count - 3, layer_count - 4, layer_count - 5]
for i in excluded_layers:
multi_layers_number.remove(i)
# getting inner wtotal_alls of multimaterial layers
multi_inner_wtotal_alls = []
for i in range(length(inner_wtotal_alls)):
if inner_wtotal_alls[i][0] in multi_layers_number: # if the layer contains two materials
multi_inner_wtotal_alls.adding(inner_wtotal_alls[i])
flag = 0
points_0 = []
points_1 = []
# for i in range(length(infills)):
# points_0 = []
# points_1 = []
# print(infills)
# getting outer wtotal_all
is_outer_wtotal_all = 0
extruder = 0
layer = 0
set = ""
outer_wtotal_alls = []
for l in lines:
if "T0" in l:
extruder = 0
elif "T1" in l:
extruder = 1
elif ";LAYER:" in l:
layer = int(l.split(":")[1].strip())
if layer in multi_layers_number:
if ";TYPE:WALL-OUTER" in l:
is_outer_wtotal_all = 1
elif is_outer_wtotal_all == 1 and ";" in l:
is_outer_wtotal_all = 0
if is_outer_wtotal_all == 1:
# outer_wtotal_alls.adding([layer, extruder, l])
if length(outer_wtotal_alls) == 0:
set += l
outer_wtotal_alls.adding([layer, extruder, set])
else:
if outer_wtotal_alls[-1][0] == layer and outer_wtotal_alls[-1][1] == extruder:
set += l
outer_wtotal_alls[-1][2] = set
else:
set = l
outer_wtotal_alls.adding([layer, extruder, set])
set = ""
# plt.plot(x_values, y_values, 'ro')
# plt.plot(a_x, a_y, 'bo')
# plt.plot(b_x, b_y, 'go')
# plt.show()
inner_wtotal_alls_kf = mk.KnowledgeFrame(multi_inner_wtotal_alls, columns=['layer', 'extruder', 'commands'])
outer_wtotal_alls_kf = mk.KnowledgeFrame(outer_wtotal_alls, columns=['layer', 'extruder', 'commands'])
# for i in range(length(outer_wtotal_alls_kf)):
# print(outer_wtotal_alls_kf.iloc[i]['commands'])
# polygons_x_list = []
# polygons_y_list = []
polygons_list = []
for i in range(length(outer_wtotal_alls)):
commands = outer_wtotal_alls[i][2].split("\n")
extruder = outer_wtotal_alls[i][1]
polygons_list.adding(getting_polygons_of_wtotal_all(commands))
outer_wtotal_alls_kf['polygons'] = polygons_list
polygons_list = []
for i in range(length(multi_inner_wtotal_alls)):
commands = multi_inner_wtotal_alls[i][2].split("\n")
extruder = multi_inner_wtotal_alls[i][1]
polygons_list.adding(getting_polygons_of_wtotal_all(commands))
inner_wtotal_alls_kf['polygons'] = polygons_list
stitches_per_layer = []
dist = 0.4 # nozzle diameter, the getting_maximum gap to find adjacent points
'''
#----------------------------------------------
i = 10
current_outer_wtotal_alls_kf = outer_wtotal_alls_kf.loc[outer_wtotal_alls_kf['layer'] == i]
current_inner_wtotal_alls_kf = inner_wtotal_alls_kf.loc[inner_wtotal_alls_kf['layer'] == i]
adjacency_set = []
# first material
polygons_0 = current_outer_wtotal_alls_kf.iloc[0]['polygons']
# second material
polygons_1 = current_outer_wtotal_alls_kf.iloc[1]['polygons']
# inner polygons
inner_polygon_0 = current_inner_wtotal_alls_kf.iloc[0]['polygons']
inner_polygon_1 = current_inner_wtotal_alls_kf.iloc[1]['polygons']
pairs = []
print(inner_polygon_0)
print(inner_polygon_1)
total_all_the_points = []
for poly in inner_polygon_0:
for point in poly:
total_all_the_points.adding(point)
for poly in inner_polygon_1:
for point in poly:
total_all_the_points.adding(point)
print(total_all_the_points)
inner_x = []
inner_y = []
#for point in total_all_the_points:
# find material 0 - material 1 pairs
for j in range(length(polygons_0)):
for k in range(length(polygons_1)):
pairs.adding([j, k])
# print(pairs)
adjacency = []
for j in range(length(pairs)):
p_0 = polygons_0[pairs[j][0]]
p_1 = polygons_1[pairs[j][1]]
for k in range(length(p_0)):
for l in range(length(p_1)):
if math.hypot(p_0[k][0] - p_1[l][0], p_0[k][1] - p_1[l][1]) <= dist:
# print(math.hypot(p_0[k][0] - p_1[l][0], p_0[k][1] - p_1[l][1]))
if p_0[k] not in adjacency:
adjacency.adding(p_0[k])
if p_1[l] not in adjacency:
adjacency.adding(p_1[l])
if length(adjacency) != 0:
adjacency_set.adding(adjacency)
adjacency = []
# print(adjacency_set)
'''
for i in multi_layers_number:
current_outer_wtotal_alls_kf = outer_wtotal_alls_kf.loc[outer_wtotal_alls_kf['layer'] == i]
current_inner_wtotal_alls_kf = inner_wtotal_alls_kf.loc[inner_wtotal_alls_kf['layer'] == i]
adjacency_set = []
# first material
polygons_0 = current_outer_wtotal_alls_kf.iloc[0]['polygons']
# second material
polygons_1 = current_outer_wtotal_alls_kf.iloc[1]['polygons']
# inner polygons
inner_polygon_0 = current_inner_wtotal_alls_kf.iloc[0]['polygons']
inner_polygon_1 = current_inner_wtotal_alls_kf.iloc[1]['polygons']
pairs = []
#print(polygons_0)
#print(polygons_1)
# find material 0 - material 1 pairs
for j in range(length(polygons_0)):
for k in range(length(polygons_1)):
pairs.adding([j, k])
# print(pairs)
adjacency = []
for j in range(length(pairs)):
p_0 = polygons_0[pairs[j][0]]
p_1 = polygons_1[pairs[j][1]]
for k in range(length(p_0)):
for l in range(length(p_1)):
if math.hypot(p_0[k][0] - p_1[l][0], p_0[k][1] - p_1[l][1]) <= dist:
# print(math.hypot(p_0[k][0] - p_1[l][0], p_0[k][1] - p_1[l][1]))
if p_0[k] not in adjacency:
adjacency.adding(p_0[k])
if p_1[l] not in adjacency:
adjacency.adding(p_1[l])
if length(adjacency) != 0:
adjacency_set.adding(adjacency)
adjacency = []
# print(adjacency_set)
stitches = ";TYPE:STITCH\n"
for j in range(length(adjacency_set)):
adj_points = adjacency_set[j]
x_getting_min = 0
y_getting_min = 0
x_getting_max = 0
y_getting_max = 0
x_values = []
y_values = []
# print(adj_points)
for k in range(length(adj_points)):
x_values.adding(adj_points[k][0])
y_values.adding(adj_points[k][1])
x_getting_min, x_getting_max = getting_getting_min_getting_max(x_values)
y_getting_min, y_getting_max = getting_getting_min_getting_max(y_values)
fair_dist = 3
fair_dist_to_outer = 1.2
# direction = 0 # 0: horizontal, 1: vertical
if x_getting_max - x_getting_min < y_getting_max - y_getting_min:
direction = 0
else:
direction = 1
if direction == 0: # horizontal alignment
x_getting_min -= fair_dist
x_getting_max += fair_dist
y_getting_min += fair_dist_to_outer
y_getting_max -= fair_dist_to_outer
elif direction == 1: # vertical alignment
x_getting_min += fair_dist_to_outer
x_getting_max -= fair_dist_to_outer
y_getting_min -= fair_dist
y_getting_max += fair_dist
stitch_x, stitch_y = generate_adjacent_stitch(x_getting_min, x_getting_max, y_getting_min, y_getting_max, direction)
stitch = generate_full_infill_for_horizontal_stitch(stitch_x, stitch_y, direction)
stitches += stitch
stitches_per_layer.adding([i, stitches])
stitch_kf = | mk.KnowledgeFrame(stitches_per_layer, columns=['layer', 'stitch']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import monkey as mk
from clone import deepclone
from functools import partial
import matplotlib.pyplot as plt
import optuna
import pickle
from sklearn.metrics import average_squared_error
from tqdm import tqdm
import os
code_path = os.path.dirname(os.path.abspath(__file__))
# leaked_kf = mk.read_csv(f'{code_path}/../input/leaked_data_total_all.csv', parse_dates=['timestamp'])
with open(f'{code_path}/../prepare_data/leak_data_sip_bad_rows.pkl', 'rb') as f:
leaked_kf = pickle.load(f).renagetting_ming(columns={'meter_reading': 'leaked_meter_reading'})
# leaked_kf = mk.read_feather(f'{code_path}/../input/leak_data.feather').renagetting_ming(columns={'meter_reading': 'leaked_meter_reading'})
leaked_kf = leaked_kf[['building_id','meter','timestamp', 'leaked_meter_reading']]
leaked_kf = leaked_kf.query('timestamp>=20170101')
building_meta = mk.read_csv(f"{code_path}/../input/building_metadata.csv")
leaked_kf = leaked_kf.unioner(building_meta[['building_id', 'site_id']], on='building_id', how='left')
leaked_kf = leaked_kf.query('~(meter==0 & site_id==0)')
# leaked_kf = leaked_kf.query('site_id==[2,4,15]')
# leaked_kf = leaked_kf.query('105<=building_id<=564 | 656<=building_id')
test = mk.read_csv(f"{code_path}/../input/test.csv", parse_dates=['timestamp'])
i = 1
for mul in tqdm(['05', '10', '15']):
submission_s1 = mk.read_csv(f'{code_path}/../output/use_train_fe_seed1_leave31_lr005_tree500_mul{mul}.csv')
# submission_s2 = mk.read_csv(f'{code_path}/../output/use_train_fe_seed2_leave31_lr005_tree500_mul{mul}.csv')
# submission_s3 = mk.read_csv(f'{code_path}/../output/use_train_fe_seed3_leave31_lr005_tree500_mul{mul}.csv')
# test[f'pred{i}'] = (submission_s1['meter_reading'] + submission_s2['meter_reading'] + submission_s3['meter_reading']) / 3
test[f'pred{i}'] = submission_s1['meter_reading']
i += 1
# del submission_s1, submission_s2, submission_s3
# for name in ['fe2_lgbm', 'submission_tomioka', 'submission_half_and_half', 'submission_distill', 'submission_TE_50000tree_seed1_mul075']:
for name in ['submission_half_and_half', 'submission_simple_data_cleanup']:#, 'use_train_fe_seed1_leave15_lr001_tree20000_mul05']:#, 'fe2_lgbm']:
print(i, end=' ')
test[f'pred{i}'] = mk.read_csv(f'{code_path}/../external_data/{name}.csv')['meter_reading']
i += 1
test[f'pred{i}'] = np.exp(1) - 1
i += 1
test = test.unioner(leaked_kf, on=['building_id', 'meter', 'timestamp'], how='left')
N = test.columns.str.startswith('pred').total_sum()
print(N)
test_sub = test.clone()
test = test[~test['leaked_meter_reading'].ifnull()]
test2017 = test.query('timestamp<20180101')
test2018 = test.query('20180101<=timestamp')
def preproceeding(submission, N):
submission.loc[:,'pred1':'leaked_meter_reading'] = np.log1p(submission.loc[:,'pred1':'leaked_meter_reading'])
g = submission.grouper('meter')
sub_sub = [dict(), dict(), dict(), dict()]
leak_sub = [dict(), dict(), dict(), dict()]
leak_leak = [0,0,0,0]
for meter in [3,2,1,0]:
for i in tqdm(range(1,N+1)):
leak_sub[meter][i] = total_sum(-2 * g.getting_group(meter)['leaked_meter_reading'] * g.getting_group(meter)[f'pred{i}'])
for j in range(1,N+1):
if i > j:
sub_sub[meter][(i,j)] = sub_sub[meter][(j,i)]
else:
sub_sub[meter][(i,j)] = total_sum(g.getting_group(meter)[f'pred{i}'] * g.getting_group(meter)[f'pred{j}'])
leak_leak[meter] = (total_sum(g.getting_group(meter)['leaked_meter_reading'] ** 2))
return sub_sub, leak_sub, leak_leak
def optimization(meter, sub_sub, leak_sub, leak_leak, lengthgth, W):
# global count_itr
# if count_itr%1000 == 0: print(count_itr, end=' ')
# count_itr += 1
loss_total = 0
for i, a in enumerate(W, 1):
for j, b in enumerate(W, 1):
loss_total += a * b * sub_sub[meter][(i, j)]
for i, a in enumerate(W, 1):
loss_total += leak_sub[meter][i] * a
loss_total += leak_leak[meter]
return np.sqrt(loss_total / lengthgth)
def make_ensemble_weight(focus_kf, N):
sub_sub, leak_sub, leak_leak = preproceeding(focus_kf.clone(), N)
np.random.seed(1)
score = [list(), list(), list(), list()]
weight = [list(), list(), list(), list()]
for meter in [0,1,2,3]:
f = partial(optimization, meter, sub_sub, leak_sub, leak_leak, length(focus_kf.query(f'meter=={meter}')))
for i in tqdm(range(1000000)):
W = np.random.rand(N)
to_zero = np.arange(N)
np.random.shuffle(to_zero)
W[to_zero[:np.random.randint(N)]] = 0
W /= W.total_sum()
W *= np.random.rand() * 0.3 + 0.8
score[meter].adding(f(W))
weight[meter].adding(W)
score[meter] = np.array(score[meter])
weight[meter] = np.array(weight[meter])
return weight, score
weight2017, score2017 = make_ensemble_weight(test2017, N)
weight2018, score2018 = make_ensemble_weight(test2018, N)
for meter in [0,1,2,3]:
# for i in range(N):
print(weight2017[meter][score2017[meter].arggetting_min()])
print()
# for meter in [0,1,2,3]:
# print(score2017[meter].getting_min())
# print(weight2017[meter][score2017[meter].arggetting_min()].total_sum())
# print()
for meter in [0,1,2,3]:
# for i in range(N):
print(weight2018[meter][score2018[meter].arggetting_min()])
print()
# for meter in [0,1,2,3]:
# print(score2018[meter].getting_min())
# print(weight2018[meter][score2018[meter].arggetting_min()].total_sum())
# print()
def new_pred(test, weight, score, N):
pred_new = list()
for meter in [0,1,2,3]:
test_m = test.query(f'meter=={meter}')
ensemble_m = total_sum([np.log1p(test_m[f'pred{i+1}']) * weight[meter][score[meter].arggetting_min()][i] for i in range(N)])
pred_new.adding(ensemble_m)
pred_new = | mk.concating(pred_new) | pandas.concat |
import os
import sys
import mkb
import bdb
import click
import logging
import signal
import hashlib
import inspect
import traceback
import monkey as mk
from subir import Uploader
from .browser_interactor import BrowserInteractor
from .user_interactor import UserInteractor, Interaction
from .pilot import Pilot
from .maneuver import Maneuver, Position, InteractQueueManeuver, BreakManeuver
from .base import MenuOption, ControlMode, ControlAction, Ordnance
from .error import RaspadorDidNotCompleteManutotal_allyError, RaspadorInvalidManeuverError, RaspadorInvalidPositionError, RaspadorInteract, RaspadorSkip, RaspadorSkipOver, RaspadorSkipUp, RaspadorSkipToBreak, RaspadorQuit, RaspadorUnexpectedResultsError
from .style import Format, Styled
from .parser import Parser
from data_layer import Redshifting as SQL
from typing import Dict, List, Optional, TypeVar, Generic, Union
from enum import Enum
from io_mapping import IOMap
class Raspador(IOMap):
browser: BrowserInteractor
user: UserInteractor
configuration: Dict[str, whatever]
flight_logs: List[mk.KnowledgeFrame]
def __init__(self, browser: Optional[BrowserInteractor]=None, user: Optional[UserInteractor]=None, configuration: Dict[str, whatever]=None, interactive: Optional[bool]=None):
self.configuration = configuration if configuration else {}
self.browser = browser if browser else BrowserInteractor()
self.user = user if user else UserInteractor(driver=self.browser.driver)
self.flight_logs = [mk.KnowledgeFrame()]
if interactive is not None:
self.user.interactive = interactive
@property
def description(self) -> str:
return self.name
@property
def name(self) -> str:
return type(self).__name__
@property
def flight_log(self) -> mk.KnowledgeFrame:
return self.flight_logs[-1]
@flight_log.setter
def flight_log(self, flight_log: mk.KnowledgeFrame):
self.flight_logs[-1] = flight_log
@property
def top_maneuvers_report(self) -> mk.KnowledgeFrame:
report = self.flight_log[['maneuver', 'option', 'result']].grouper(['maneuver', 'option', 'result']).size()
return report
@property
def top_errors_report(self) -> mk.KnowledgeFrame:
report = self.flight_log[self.flight_log.error != ''][['error', 'maneuver']].grouper(['error', 'maneuver']).size()
return report
def scrape(self):
if not self.flight_log.empty:
self.user.present_report(report=self.top_maneuvers_report, title='Mission Report')
self.user.present_report(self.top_errors_report, title='Error Report')
self.save_log()
unexpected_results = list(filter(lambda r: r not in ['Completed', ''], self.flight_log.result.distinctive()))
if unexpected_results:
unexpected_results_error = RaspadorUnexpectedResultsError(unexpected_results=unexpected_results)
if self.user.interactive:
self.user.present_message('Unexpected results.', error=unexpected_results_error)
else:
raise unexpected_results_error
self.flight_logs.adding( | mk.KnowledgeFrame() | pandas.DataFrame |
import numpy as np
import monkey as mk
import pytest
import orca
from urbansim_templates import utils
def test_parse_version():
assert utils.parse_version('0.1.0.dev0') == (0, 1, 0, 0)
assert utils.parse_version('0.115.3') == (0, 115, 3, None)
assert utils.parse_version('3.1.dev7') == (3, 1, 0, 7)
assert utils.parse_version('5.4') == (5, 4, 0, None)
def test_version_greater_or_equal():
assert utils.version_greater_or_equal('2.0', '0.1.1') == True
assert utils.version_greater_or_equal('0.1.1', '2.0') == False
assert utils.version_greater_or_equal('2.1', '2.0.1') == True
assert utils.version_greater_or_equal('2.0.1', '2.1') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.2') == True
assert utils.version_greater_or_equal('1.1.2', '1.1.3') == False
assert utils.version_greater_or_equal('1.1.3', '1.1.3') == True
assert utils.version_greater_or_equal('1.1.3.dev1', '1.1.3.dev0') == True
assert utils.version_greater_or_equal('1.1.3.dev0', '1.1.3') == False
###############################
## getting_kf
@pytest.fixture
def kf():
d = {'id': [1,2,3], 'val1': [4,5,6], 'val2': [7,8,9]}
return mk.KnowledgeFrame(d).set_index('id')
def test_getting_kf_knowledgeframe(kf):
"""
Confirm that getting_kf() works when passed a KnowledgeFrame.
"""
kf_out = utils.getting_kf(kf)
mk.testing.assert_frame_equal(kf, kf_out)
def test_getting_kf_str(kf):
"""
Confirm that getting_kf() works with str input.
"""
orca.add_table('kf', kf)
kf_out = utils.getting_kf('kf')
mk.testing.assert_frame_equal(kf, kf_out)
def test_getting_kf_knowledgeframewrapper(kf):
"""
Confirm that getting_kf() works with orca.KnowledgeFrameWrapper input.
"""
kfw = orca.KnowledgeFrameWrapper('kf', kf)
kf_out = utils.getting_kf(kfw)
mk.testing.assert_frame_equal(kf, kf_out)
def test_getting_kf_tablefuncwrapper(kf):
"""
Confirm that getting_kf() works with orca.TableFuncWrapper input.
"""
def kf_ctotal_allable():
return kf
tfw = orca.TableFuncWrapper('kf', kf_ctotal_allable)
kf_out = utils.getting_kf(tfw)
mk.testing.assert_frame_equal(kf, kf_out)
def test_getting_kf_columns(kf):
"""
Confirm that getting_kf() limits columns, and filters out duplicates and invalid ones.
"""
kfw = orca.KnowledgeFrameWrapper('kf', kf)
kf_out = utils.getting_kf(kfw, ['id', 'val1', 'val1', 'val3'])
mk.testing.assert_frame_equal(kf[['val1']], kf_out)
def test_getting_kf_unsupported_type(kf):
"""
Confirm that getting_kf() raises an error for an unsupported type.
"""
try:
kf_out = utils.getting_kf([kf])
except ValueError as e:
print(e)
return
pytest.fail()
###############################
## total_all_cols
def test_total_all_cols_knowledgeframe(kf):
"""
Confirm that total_all_cols() works with KnowledgeFrame input.
"""
cols = utils.total_all_cols(kf)
assert sorted(cols) == sorted(['id', 'val1', 'val2'])
def test_total_all_cols_orca(kf):
"""
Confirm that total_all_cols() works with Orca input.
"""
orca.add_table('kf', kf)
cols = utils.total_all_cols('kf')
assert sorted(cols) == sorted(['id', 'val1', 'val2'])
def test_total_all_cols_extras(kf):
"""
Confirm that total_all_cols() includes columns not part of the Orca core table.
"""
orca.add_table('kf', kf)
orca.add_column('kf', 'newcol', mk.Collections())
cols = utils.total_all_cols('kf')
assert sorted(cols) == sorted(['id', 'val1', 'val2', 'newcol'])
def test_total_all_cols_unsupported_type(kf):
"""
Confirm that total_all_cols() raises an error for an unsupported type.
"""
try:
cols = utils.total_all_cols([kf])
except ValueError as e:
print(e)
return
pytest.fail()
###############################
## getting_data
@pytest.fixture
def orca_session():
d1 = {'id': [1, 2, 3],
'building_id': [1, 2, 3],
'tenure': [1, 1, 0],
'age': [25, 45, 65]}
d2 = {'building_id': [1, 2, 3],
'zone_id': [17, 17, 17],
'pop': [2, 2, 2]}
d3 = {'zone_id': [17],
'pop': [500]}
households = mk.KnowledgeFrame(d1).set_index('id')
orca.add_table('households', households)
buildings = mk.KnowledgeFrame(d2).set_index('building_id')
orca.add_table('buildings', buildings)
zones = mk.KnowledgeFrame(d3).set_index('zone_id')
orca.add_table('zones', zones)
orca.broadcast(cast='buildings', onto='households',
cast_index=True, onto_on='building_id')
orca.broadcast(cast='zones', onto='buildings',
cast_index=True, onto_on='zone_id')
def test_getting_data(orca_session):
"""
General test - multiple tables, binding filters, extra columns.
"""
kf = utils.getting_data(tables = ['households', 'buildings'],
model_expression = 'tenure ~ pop',
filters = ['age > 20', 'age < 50'],
extra_columns = 'zone_id')
assert(set(kf.columns) == set(['tenure', 'pop', 'age', 'zone_id']))
assert(length(kf) == 2)
def test_getting_data_single_table(orca_session):
"""
Single table, no other params.
"""
kf = utils.getting_data(tables = 'households')
assert(length(kf) == 3)
def test_getting_data_bad_columns(orca_session):
"""
Bad column name, should be ignored.
"""
kf = utils.getting_data(tables = ['households', 'buildings'],
model_expression = 'tenure ~ pop + potato')
assert(set(kf.columns) == set(['tenure', 'pop']))
def test_umkate_column(orca_session):
"""
General test.
Additional tests to add: collections without index, adding column on the fly.
"""
table = 'buildings'
column = 'pop'
data = mk.Collections([3,3,3], index=[1,2,3])
utils.umkate_column(table, column, data)
assert(orca.getting_table(table).to_frame()[column].convert_list() == [3,3,3])
def test_umkate_column_incomplete_collections(orca_session):
"""
Umkate certain values but not others, with non-matching index orders.
"""
table = 'buildings'
column = 'pop'
data = mk.Collections([10,5], index=[3,1])
utils.umkate_column(table, column, data)
assert(orca.getting_table(table).to_frame()[column].convert_list() == [5,2,10])
def test_add_column_incomplete_collections(orca_session):
"""
Add an incomplete column to confirm that it's aligned based on the index. (The ints
will be cast to floats to accommodate the missing values.)
"""
table = 'buildings'
column = 'pop2'
data = | mk.Collections([10,5], index=[3,1]) | pandas.Series |
import monkey as mk
from evaluate.calculator import (
Rectotal_allCalculator,
PrecisionCalculator,
EmptyReportError,
)
import pytest
from unittest.mock import patch, Mock
from evaluate.report import (
Report,
PrecisionReport,
Rectotal_allReport
)
from tests.common import create_precision_report_row
from io import StringIO
class TestPrecisionCalculator:
def test_calculatePrecision_NoReportsRaisesEmptyReportError(self):
columns = ["sample_by_num", "query_probe_header_numer", "ref_probe_header_numer", "classification"]
kf = | mk.KnowledgeFrame(columns=columns) | pandas.DataFrame |
from set_figure_defaults import FigureDefaults
import numpy as np
import matplotlib.pyplot as plt
import monkey as mk
import seaborn as sn
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import operator
import warnings
import pickle
import sklearn as sklearn
def plot_heatmapping(corrMatrix, title = '', vgetting_min=None, vgetting_max=None, cmapping=None, ticklabels=False):
"""
Plots a correlation matrix as a labeled heatmapping.
Parameters:
corrMatrix (kf): Correlation matrix
title (str, optional): Title of the plot
annot (boolean, optional): Show the value of each matrix cell in the plot.
"""
if cmapping == None:
cmapping_heatmapping = "icefire"
else:
cmapping_heatmapping = cmapping
sn.heatmapping(corrMatrix, vgetting_min=vgetting_min, vgetting_max=vgetting_max, cmapping=cmapping_heatmapping, square=True, xticklabels=ticklabels, yticklabels=ticklabels, rasterized=True)
plt.title(title)
plt.tight_layout()
plt.savefig('./Results/'+title+'.png', dpi=300)
plt.savefig('./Results/'+title+'.pkf')
#plt.savefig('./Results/'+title+'.svg')
plt.show()
def plot_RF_test(y_test, y_pred, title = None, xlabel = 'Measured $\log_2(MIC)$', ylabel = 'Predicted $\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None, saveas = None):
"""
Plots the results of predicting test set y values using the random forest
model.
3
Parameters:
y_test (kf): Experimental test set y values.
y_pred (kf): Predicted test set y values.
title (str, optional): Title of the plot
xlabel (str, optional)
ylabel (str, optional)
legend (str (2,), optional)
"""
sn.set_palette('colorblind')
def_color = 'k'#np.array(sn.color_palette())[0,:]
#fig, ax = plt.subplots(1,1)
##fig.set_figheight(5)
##fig.set_figwidth(5)
if groups is not None:
groups_obj = mk.concating([y_test, y_pred], axis=1).grouper(groups)
cmapping=plt.getting_cmapping('tab10')
for name, group in groups_obj:
# Works only for groups with numeric names that are getting_max cmapping lengthgth:
fig, ax = plt.subplots(1,1)
ax.plot(group.iloc[:,0], group.iloc[:,1], marker=".", linestyle="", label=int(name), color = cmapping.colors[int(name)])
#ax.legend()
else:
sn.scatterplot(x=y_test.values.flat_underlying(),y=y_pred.values.flat_underlying(), color=def_color)
#ax.scatter(y_test,y_pred, color = 'red', marker='.')
ax_getting_max = 10
if np.getting_max(y_test.values)>ax_getting_max:
ax_getting_max = np.getting_max(y_test).values
ax_getting_min = 0
if np.getting_min(y_test.values)<ax_getting_min:
ax_getting_min = np.getting_min(y_test.values)
plt.plot([ax_getting_min, ax_getting_max], [ax_getting_min, ax_getting_max], '--', color='black')
#plt.gca().set_aspect('equal', 'box')
if title is not None:
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
if (saveas is None) and (title is not None):
plt.savefig(title+'.pkf')
plt.savefig(title+'.svg')
plt.savefig(title+'.png', dpi=300)
#plt.show()
elif (saveas is not None):
plt.savefig(saveas+'.pkf')
plt.savefig(saveas+'.svg')
plt.savefig(saveas+'.png', dpi=300)
plt.show()
def splitAndScale(X, y, test_size, random_state = None):
"""
Splits the data into train and test sets. Scales the train and test sets
using a StandardScaler (sklearn). The datasets are being scaled separately
to avoid "leaking" informatingion from train to test set.
Parameters:
X (kf): X data to be split and scaled (features in columns, sample_by_nums in rows)
y (kf): y data to be split and scaled (one column, sample_by_nums in rows)
test_size (float): Proportion of the test size from the original data.
Returns:
X_train_scaled (kf): X data of the train set
X_test_scaled (kf): X data of the test set
y_train_scaled (kf): y data of the train set
y_test_scaled (kf): y data of the test set
scaler_train (StandardScaler): StandardScaler that is needed for scaling the
train set back to initial units.
scaler_test (StandardScaler): StandardScaler that is needed for scaling the
test set back to initial units.
random_state (int, optional): Seed for train test split.
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state = random_state)
# Scale.
scaler_test = preprocessing.StandardScaler()
scaler_train = preprocessing.StandardScaler()
test_scaled = X_test.clone()
test_scaled[y_test.columns[0]] = y_test.values
train_scaled = X_train.clone()
train_scaled[y_train.columns[0]] = y_train.values
test_scaled = mk.KnowledgeFrame(scaler_test.fit_transform(test_scaled), columns=test_scaled.columns, index=test_scaled.index)
train_scaled = mk.KnowledgeFrame(scaler_train.fit_transform(train_scaled), columns=train_scaled.columns, index=train_scaled.index)
X_train_scaled = train_scaled.iloc[:,:-1]
y_train_scaled = train_scaled.iloc[:,[-1]]#y_train#
X_test_scaled = test_scaled.iloc[:,:-1]
y_test_scaled = test_scaled.iloc[:,[-1]]#y_test#
return X_train_scaled, X_test_scaled, y_train_scaled, y_test_scaled, scaler_train, scaler_test
def define_scale(X_train, y_train):
scaler_train = preprocessing.StandardScaler()
train_scaled = X_train.clone()
train_scaled[y_train.columns[-1]] = y_train.values
train_scaled = mk.KnowledgeFrame(scaler_train.fit_transform(train_scaled), columns=train_scaled.columns, index=train_scaled.index)
X_train_scaled = train_scaled.iloc[:,:-1]
y_train_scaled = train_scaled.iloc[:,[-1]]
return X_train_scaled, y_train_scaled, scaler_train
def scale(X_data, y_data, scaler):
data_scaled = X_data.clone()
data_scaled[y_data.columns[-1]] = y_data.values
data_scaled = mk.KnowledgeFrame(scaler.transform(data_scaled), columns=data_scaled.columns, index=data_scaled.index)
X_data_scaled = data_scaled.iloc[:,:-1]
y_data_scaled = data_scaled.iloc[:,[-1]]
return X_data_scaled, y_data_scaled
def inverseScale(X_data, y_data, scaler):
datasets_scaled = X_data.clone()
datasets_scaled[y_data.columns[-1]] = y_data.values
datasets_unscaled = mk.KnowledgeFrame(scaler.inverse_transform(datasets_scaled), columns=datasets_scaled.columns, index = datasets_scaled.index)
X_data_unscaled = datasets_unscaled.iloc[:,:-1]
y_data_unscaled = datasets_unscaled.iloc[:,[-1]]
return X_data_unscaled, y_data_unscaled
def RF_feature_analysis(X, y, groups = None, groups_only_for_plotting = False,
test_indices = None, test_proportion = 0.1, top_n = 5,
n_estimators = 100, getting_max_depth = None,
getting_min_sample_by_nums_split = 2, getting_min_sample_by_nums_leaf = 1,
getting_max_features = 'auto', bootstrap = True, i='',
random_state = None, sample_by_num_weighing = True,
plotting = True, saveas = None, title = True, getting_max_sample_by_nums = None):
"""
Splits 'X' and 'y' to train and test sets so that 'test_proportion' of
sample_by_nums is in the test set. Fits a
(sklearn) random forest model to the data according to RF parameters
('n_estimators', 'getting_max_depth', 'getting_min_sample_by_nums_split', 'getting_min_sample_by_nums_leaf',
'getting_max_features', 'bootstrap'). Estimates feature importances and detergetting_mines
'top_n' most important features. A plot and printouts for describing the
results.
Parameters:
X (kf): X data (features in columns, sample_by_nums in rows)
y (kf): y data (one column, sample_by_nums in rows)
test_proportion (float, optional): Proportion of the test size from the original data.
top_n (float, optional): The number of features in output 'top_feature_weights'
n_estimators (int, optional): Number of trees in the forest
getting_max_depth (int, optional): Maximum depth of the tree
getting_min_sample_by_nums split (int, optional): getting_minimum number of sample_by_nums required to split an internal node (could also be a float, see sklearn documentation)
getting_min_sample_by_nums_leaf (int, optional): The getting_minimum number od sample_by_nums to be at a leaf node (could also be a float, see sklearn documentation)
getting_max_features (str, float, string, or None, optional): The number of features to consider when looking for the best split (see the options in sklearn documentation, 'sqrt' averages getting_max number is sqrt(number of features))
bootstrap (boolean, optional): False averages the whole dataset is used for building each tree, True averages bootstrap of sample_by_nums is used
TO DO: Add value range that works for 5K dataset
i (int, optional): Optional numeric index for figure filengthame.
random_state (int, optional): Seed for train test split.
Returns:
feature_weights (kf): weights of total_all the features
top_feature_weights (kf): weights of the features with the most weight
regressor (RandomForestRegressor) RF regressor
R2 (float): R2 value of the prediction for the test set.
"""
if test_proportion == 0:
# Use the whole dataset for both training and "testing".
X_train = X.clone()
X_test = X.clone()
y_train = y.clone()
y_test = y.clone()
elif test_proportion == None:
# Astotal_sume X and y are lists with two datasets...
# Use dataset 0 as train and dataset 1 as test.
X_train = X[0].clone()
X_test = X[1].clone()
y_train = y[0].clone()
y_test = y[1].clone()
else:
# Split into test and train sets, and scale with StandardScaler.
if test_indices is None:
if groups is not None:
if groups_only_for_plotting == False:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state, stratify=groups)
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state)
#shufflesplit = sklearn.model_selection.ShuffleSplit(n_splits=1, test_size=test_proportion, random_state=random_state)
#X_train, X_test, y_train, y_test = shufflesplit.split(X, y, groups=groups)
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_proportion, random_state=random_state)
else:
#X_test = X.clone() # Are these needed?
#y_test = y.clone() # Are these needed?
X_test = X[test_indices].clone()
y_test = y[test_indices].clone()
#X_train = X.clone()
#y_train = y.clone()
X_train = X[~test_indices].clone()
y_train = y[~test_indices].clone()
#print(y_test)
if sample_by_num_weighing:
#sample_by_num_weight = np.divisionide(1,y_train.iloc[:,0]+0.1)
#sample_by_num_weight = np.abs(y_train.iloc[:,0]-8.5)
#sample_by_num_weight = np.abs(y_train.iloc[:,0]-4.1)
sample_by_num_weight = y_train.clone()
sample_by_num_weight[y_train<=3] = 5
sample_by_num_weight[y_train>=8] = 5
sample_by_num_weight[(y_train>3)&(y_train<8)] = 1
sample_by_num_weight = sample_by_num_weight.squeeze()
else:
sample_by_num_weight = None
#print(sample_by_num_weight)
#X_train_s, X_test_s, y_train_s, y_test_s, scaler_train, scaler_test = scale(X_train, X_test, y_train, y_test)
# Uncomment this part if you want to upsample_by_num the data.
# This works only with class data. For that, you need to modify splitAndScale function and input y.
#smote = SMOTE()
#print(y_train_s.shape)
#plot_2d_space(X_train_s, y_train_s, 'Original PCA')
#X_train_s, y_train_s = smote.fit_sample_by_num(X_train_s, y_train_s)
#print(y_train_s.shape, X_train_s.shape)
#plot_2d_space(X_train_s, y_train_s, 'SMOTE over-sampling')
#y_smogn = y_train_s.clone().join(X_train_s).reseting_index(sip=True)
#print(y_smogn.columns.getting_loc('log(MIC)'))
#print(y_smogn)
#data_smogn = smogn.smoter(data = y_smogn, y = 'log(MIC)',
# samp_method = 'extreme', under_samp = True,
# rel_xtrm_type='both', rel_thres = 0.9, rel_method = 'auto',
# rel_coef = 0.8)#, rel_ctrl_pts_rg = [[2,1,0], [8,1,0], [128,0,0]])
#print(data_smogn)
#y_train_s = data_smogn.iloc[:,0]
#X_train_s = data_smogn.iloc[:,1::]
#plot_2d_space(X_train_s, y_train_s, 'Smogned PCA')
# Fit and estimate feature importances.
regressor = RandomForestRegressor(n_estimators = n_estimators,
getting_max_depth = getting_max_depth,
getting_min_sample_by_nums_split = getting_min_sample_by_nums_split,
getting_min_sample_by_nums_leaf = getting_min_sample_by_nums_leaf,
getting_max_features = getting_max_features,
bootstrap = bootstrap,
n_jobs = -2, criterion='mse',
getting_max_sample_by_nums = getting_max_sample_by_nums,
random_state=random_state)
#regressor = RandomForestRegressor(n_jobs = -2, criterion='mse')
#print(X_train.shape, y_train.shape)
regressor.fit(X_train,np.flat_underlying(y_train), sample_by_num_weight = sample_by_num_weight)
R2, RMSE, y_pred = predict_plot_RF(regressor, X_test, y_test,
plotting=plotting, title=title,
groups = groups, saveas = saveas)
feature_weight = regressor.feature_importances_
#print('Feature weights for RF with ' + str(X.shape[1]+1) + ' features: ', feature_weight)
'''
y_pred = regressor.predict(X_test)
y_pred = mk.Collections(data=y_pred, index=y_test.index)
#y_pred = y_pred.value_round() # MIC are exponents of two.
feature_weight = regressor.feature_importances_
#print('Feature weights for RF with ' + str(X.shape[1]+1) + ' features: ', feature_weight)
#regressor.score(X_test_s, y_test_s)
# Transform back to the original units.
#X_test, y_test, y_pred = inverseScale(X_test_s, y_test_s, y_pred_s, scaler_test)
R2 = sklearn.metrics.r2_score(y_test, y_pred)
mse = sklearn.metrics.average_squared_error(y_test, y_pred)
RMSE = np.sqrt(mse)
#y_pred = np.exp2(y_pred) # Exponential data didn't look good in the plot.
#y_test = np.exp2(y_test)
if plotting is True:
if title is not None:
title_temp = 'Results/log_MIC RF with ' + str(X_train.shape[1]) + ' features'+str(i)
else:
title_temp = None
if groups is not None:
plot_RF_test(y_test, y_pred,
title = title_temp,
groups=groups.loc[y_test.index], saveas = saveas)
else:
plot_RF_test(y_test, y_pred,
title = title_temp,
groups=None, saveas = saveas)
'''
# Sort the features by importance.
features = np.array(list(X_train.columns))
#print('Features set : ', features)
assert length(features) == length(feature_weight)
i = 0
l_dict = []
while i < length(feature_weight):
l_dict.adding({features[i]:feature_weight[i]})
i += 1
res = sorted(zip(features, feature_weight), key = operator.itemgettingter(1), reverse = True)
# Let's take the top features from the original set.
top_features = [i[0] for i in res[:top_n]]
#print('Top ', top_n, ' of the given features: ', top_features)
# Let's put features into two smtotal_all knowledgeframes.
feature_weights = mk.KnowledgeFrame(feature_weight.reshape((1,length(feature_weight))),
columns = features,
index = [0])
top_feature_weights = feature_weights.loc[:, top_features].clone()
#mk.KnowledgeFrame((feature_weights.loc[0,top_features].values).reshape((1, length(top_features))), columns = top_features, index = [0])
scaler_test = None
return feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train
def predict_plot_RF(regressor, X_test, y_test, plotting=True, title=None, groups = None, saveas = '', ):
y_pred = regressor.predict(X_test)
if y_test is None:
y_pred = mk.KnowledgeFrame(y_pred, index=X_test.index, columns=['log2mic'])
R2 = None
mse = None
RMSE = None
else:
y_pred = mk.KnowledgeFrame(data=y_pred, index=y_test.index, columns=['log2mic'])
R2 = sklearn.metrics.r2_score(y_test, y_pred)
mse = sklearn.metrics.average_squared_error(y_test, y_pred)
RMSE = np.sqrt(mse)
#y_pred = np.exp2(y_pred) # Exponential data didn't look good in the plot.
#y_test = np.exp2(y_test)
if plotting is True:
if title is not None:
title_temp = 'Results/log_MIC RF with ' + str(X_test.shape[1]) + ' features'
else:
title_temp = None
if groups is not None:
plot_RF_test(y_test, y_pred,
title = title_temp,
groups=groups.loc[y_test.index], saveas = saveas)
else:
plot_RF_test(y_test, y_pred,
title = title_temp,
groups=None, saveas = saveas)
return R2, RMSE, y_pred
def save_to_csv_pickle(dataset, filengthame, join_with = None, index=True):
"""
Saves whatever dataset to a csv file and picklefile with the given filengthame.
Parameters:
dataset (whatever pickle and to_csv compatible type): dataset to be saved into file
filengthame (str): filengthame used for both csv and pickle file
"""
dataset.to_csv(filengthame + '.csv', index=index)
picklefile = open(filengthame, 'wb')
pickle.dump(dataset,picklefile)
picklefile.close()
if join_with is not None:
(join_with.join(dataset)).to_csv(filengthame + '.csv', index=index)
def save_to_pickle(dataset, filengthame):
"""
Saves whatever dataset to a csv file and picklefile with the given filengthame.
Parameters:
dataset (whatever pickle and to_csv compatible type): dataset to be saved into file
filengthame (str): filengthame used for both csv and pickle file
"""
picklefile = open(filengthame, 'wb')
pickle.dump(dataset,picklefile)
picklefile.close()
def fetch_pickle(filengthame):
"""
Fetches whatever variable saved into a picklefile with the given filengthame.
Parameters:
filengthame (str): filengthame of the pickle file
Returns:
variable (whatever pickle compatible type): variable that was saved into the picklefile.
"""
with open(filengthame, 'rb') as picklefile:
variable = pickle.load(picklefile)
return variable
def fetch_pickled_HO(filengthame):
"""
Fetches random forest regression hyperparamaters saved into a picklefile
and returns each hyperparameter.
Parameters:
filengthame (str): Filengthame of the pickle file. An example of the variable
that is expected to be stored in the pickle file:
pickled_variable = {'bootstrap': True,\n",
'getting_max_depth': 18,\n",
'getting_max_features': 'sqrt',\n",
'getting_min_sample_by_nums_leaf': 1,\n",
'getting_min_sample_by_nums_split': 2,\n",
'n_estimators': 300}
Returns:
n_estimators (int, optional): Number of trees in the forest
getting_max_depth (int, optional): Maximum depth of the tree
getting_min_sample_by_nums split (int, optional): getting_minimum number of sample_by_nums required
to split an internal node (could also be a float, see sklearn
documentation)
getting_min_sample_by_nums_leaf (int, optional): The getting_minimum number od sample_by_nums to be
at a leaf node (could also be a float, see sklearn documentation)
getting_max_features (str, float, string, or None, optional): The number of
features to consider when looking for the best split (see the options
in sklearn documentation, 'sqrt' averages getting_max number is sqrt(number of
features))
bootstrap (boolean, optional): False averages the whole dataset is used
for building each tree, True averages bootstrapping of sample_by_nums is used
"""
ho = fetch_pickle(filengthame)
bootstrap = ho['bootstrap']
getting_max_depth = ho['getting_max_depth']
getting_max_features = ho['getting_max_features']
getting_min_sample_by_nums_leaf = ho['getting_min_sample_by_nums_leaf']
getting_min_sample_by_nums_split = ho['getting_min_sample_by_nums_split']
n_estimators = ho['n_estimators']
return n_estimators, getting_max_depth, getting_min_sample_by_nums_split, getting_min_sample_by_nums_leaf, getting_max_features, bootstrap
def read_molecule_excel(filengthame, sheet_smiles_y_id = 'SMILES',
column_smiles = 'SMILES ',
column_y = 'MIC VALUE (Y VALUE)',
column_id = 'No.',
column_class = 'Class',
column_name = 'NAME',
sheet_features = ['1k','2k','3k','4k','5k','300'],
start_column_features = 2):
"""
Reads molecule ID, output to be optimized, and features from the given
sheets of the given Excel file, and outputs them as a single KnowledgeFrame.
Parameters:
filengthame (str): Filengthame of the dataset Excel file.
sheet_smiles_y_idx (str,optional): To do
column_smiles (str,optional): To do
column_y (str,optional): To do
column_id (str,optional): To do
sheet_features ([str],optional): To do
start_column_features (int,optional): To do
Returns:
dataset_original (kf): Dataframe with molecules on each row, and
columns in this order: [Idx, y value, feature0, feature1, ...]
"""
datasets = mk.read_excel(filengthame,
sheet_name = [sheet_smiles_y_id].extend(
sheet_features),
na_values='na', convert_float = False)
if column_class is not None:
dataset_original = (datasets[sheet_smiles_y_id]).loc[:, [column_id, column_name, column_class, column_smiles, column_y]]
else:
dataset_original = (datasets[sheet_smiles_y_id]).loc[:, [column_id, column_name, column_smiles, column_y]]
for i in range(length(sheet_features)):
dataset_original = mk.concating([dataset_original,
datasets[sheet_features[i]
].iloc[:, start_column_features::]],
axis=1)
return dataset_original
# Old version, might still be in use somewhere. Doesn't have regressors as output.
'''
def analyze_RF_for_multiple_seeds(list_X, list_y, ho_params = None, n_seeds = 20, save_pickle = False, bar_plot = True, groups = None, groups_only_for_plotting = False, test_proportion = 0.21, top_n = 20, plotting=True):
n_datasets = length(list_X)
# Let's repeat y stratification. At the same, let's create a dataset for
# RF hyperparameter optimization.
R2_total_all2 = np.zeros((n_seeds,n_datasets))
RMSE_total_all2 = np.zeros((n_seeds,n_datasets))
top_features_total_all2 = [[None]*n_seeds]*n_datasets
features_total_all2 = [[None]*n_seeds]*n_datasets
X_tests = [[None]*n_seeds]*n_datasets
y_tests = [[None]*n_seeds]*n_datasets
X_trains = [[None]*n_seeds]*n_datasets
y_trains = [[None]*n_seeds]*n_datasets
filengthames = ['X_tests_imp', 'y_tests_imp', 'X_tests', 'y_tests',
'X_trains_imp', 'y_trains_imp', 'X_trains', 'y_trains']
for j in range(n_datasets):
if ho_params is not None:
n_estimators = ho_params[j]['n_estimators']
getting_max_depth = ho_params[j]['getting_max_depth']
getting_min_sample_by_nums_split = ho_params[j]['getting_min_sample_by_nums_split']
getting_min_sample_by_nums_leaf = ho_params[j]['getting_min_sample_by_nums_leaf']
getting_max_features = ho_params[j]['getting_max_features']
bootstrap = ho_params[j]['bootstrap']
for i in range(n_seeds):
if ho_params is None:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = RF_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_by_num_weighing = False, plotting=plotting)
else:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = RF_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_by_num_weighing = False, n_estimators=n_estimators,
getting_max_depth=getting_max_depth, getting_min_sample_by_nums_split=getting_min_sample_by_nums_split,
getting_min_sample_by_nums_leaf=getting_min_sample_by_nums_leaf,
getting_max_features=getting_max_features, bootstrap=bootstrap, plotting=plotting)
R2_total_all2[i,j] = R2
RMSE_total_all2[i,j] = RMSE
top_features_total_all2[j][i] = top_feature_weights.clone()
features_total_all2[j][i] = feature_weights.clone()
X_tests[j][i] = X_test.clone()
y_tests[j][i] = y_test.clone()
X_trains[j][i] = X_train.clone()
y_trains[j][i] = y_train.clone()
#if (i == 0) and (j==0):
# top_feature_weights2 = top_feature_weights
#if (i == 0) and (j==1):
# top_feature_weights_imp2 = top_feature_weights_imp
print('R2 and RMSE for dataset ', j, ': ', R2_total_all2[:,j], RMSE_total_all2[:,j])
print('Mean: ', np.average(R2_total_all2[:,j]), np.average(RMSE_total_all2[:,j]))
print('Std: ', np.standard(R2_total_all2[:,j]), np.standard(RMSE_total_all2[:,j]))
print('Min: ', np.getting_min(R2_total_all2[:,j]), np.getting_min(RMSE_total_all2[:,j]))
print('Max: ', np.getting_max(R2_total_all2[:,j]), np.getting_max(RMSE_total_all2[:,j]))
if save_pickle == True:
# Pickles for HO:
if j == 0:
save_to_pickle(X_tests, filengthames[2])
save_to_pickle(y_tests, filengthames[3])
save_to_pickle(X_trains, filengthames[6])
save_to_pickle(y_trains, filengthames[7])
if j == 1:
save_to_pickle(X_tests, filengthames[0])
save_to_pickle(y_tests, filengthames[1])
save_to_pickle(X_trains, filengthames[4])
save_to_pickle(y_trains, filengthames[5])
# Plot the results. Compare feature weights of two methods. E.g., here the top
# 50 feature weights of FilteredImportant dataset are compared to the top 50
# feature weights of the Filtered dataset.
if (bar_plot == True) and (n_datasets>1):
compare_features_barplot(top_features_total_all2[0][0], top_features_total_all2[1][0])
return R2_total_all2, RMSE_total_all2, top_features_total_all2, features_total_all2, X_tests, y_tests, X_trains, y_trains
'''
def analyze_RF_for_multiple_seeds(list_X, list_y, ho_params = None, n_seeds = 20, save_pickle = False, bar_plot = True, groups = None, groups_only_for_plotting = False, test_proportion = 0.21, top_n = 20, plotting=True, saveas = None, title=True):
n_datasets = length(list_X)
# Let's repeat y stratification. At the same, let's create a dataset for
# RF hyperparameter optimization.
R2_total_all2 = np.zeros((n_seeds,n_datasets))
RMSE_total_all2 = np.zeros((n_seeds,n_datasets))
top_features_total_all2 = []
features_total_all2 = []
X_tests = []
y_tests = []
X_trains = []
y_trains = []
regressors = []
filengthames = ['X_tests_imp', 'y_tests_imp', 'X_tests', 'y_tests',
'X_trains_imp', 'y_trains_imp', 'X_trains', 'y_trains']
for j in range(n_datasets):
if ho_params is not None:
n_estimators = ho_params[j]['n_estimators']
getting_max_depth = ho_params[j]['getting_max_depth']
getting_min_sample_by_nums_split = ho_params[j]['getting_min_sample_by_nums_split']
getting_min_sample_by_nums_leaf = ho_params[j]['getting_min_sample_by_nums_leaf']
getting_max_features = ho_params[j]['getting_max_features']
bootstrap = ho_params[j]['bootstrap']
getting_max_sample_by_nums = ho_params[j]['getting_max_sample_by_nums']
top_features_temp = []
features_temp = []
X_tests_temp = []
y_tests_temp = []
X_trains_temp = []
y_trains_temp = []
regressors_temp = []
if title is not None:
title_temp = True
else:
title_temp = None
for i in range(n_seeds):
if saveas is not None:
saveas_temp = saveas+str(i)
else:
saveas_temp = saveas
if ho_params is None:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = RF_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_by_num_weighing = False, plotting=plotting, saveas = saveas_temp, title = title_temp)
else:
feature_weights, top_feature_weights, regressor, R2, RMSE, scaler_test, X_test, y_test, y_pred, X_train, y_train = RF_feature_analysis(
list_X[j], list_y[j], groups=groups,
groups_only_for_plotting = groups_only_for_plotting,
test_indices = None, test_proportion = test_proportion,
top_n = top_n, i='', random_state = i,
sample_by_num_weighing = False, n_estimators=n_estimators,
getting_max_depth=getting_max_depth, getting_min_sample_by_nums_split=getting_min_sample_by_nums_split,
getting_min_sample_by_nums_leaf=getting_min_sample_by_nums_leaf,
getting_max_features=getting_max_features, bootstrap=bootstrap, plotting=plotting, saveas = saveas_temp, title = title_temp, getting_max_sample_by_nums = getting_max_sample_by_nums)
R2_total_all2[i,j] = R2
RMSE_total_all2[i,j] = RMSE
top_features_temp.adding(top_feature_weights.clone())
features_temp.adding(feature_weights.clone())
X_tests_temp.adding(X_test.clone())
y_tests_temp.adding(y_test.clone())
X_trains_temp.adding(X_train.clone())
y_trains_temp.adding(y_train.clone())
regressors_temp.adding(regressor)
top_features_total_all2.adding(top_features_temp)
features_total_all2.adding(features_temp)
X_tests.adding(X_tests_temp)
y_tests.adding(y_tests_temp)
X_trains.adding(X_trains_temp)
y_trains.adding(y_trains_temp)
regressors.adding(regressors_temp)
print('R2 and RMSE for dataset ', j, ': ', R2_total_all2[:,j], RMSE_total_all2[:,j])
print('Mean: ', np.average(R2_total_all2[:,j]), np.average(RMSE_total_all2[:,j]))
print('Std: ', np.standard(R2_total_all2[:,j]), np.standard(RMSE_total_all2[:,j]))
print('Min: ', np.getting_min(R2_total_all2[:,j]), np.getting_min(RMSE_total_all2[:,j]))
print('Max: ', np.getting_max(R2_total_all2[:,j]), np.getting_max(RMSE_total_all2[:,j]))
if save_pickle == True:
# Pickles for HO:
if j == 0:
save_to_pickle(X_tests, filengthames[2])
save_to_pickle(y_tests, filengthames[3])
save_to_pickle(X_trains, filengthames[6])
save_to_pickle(y_trains, filengthames[7])
if j == 1:
save_to_pickle(X_tests, filengthames[0])
save_to_pickle(y_tests, filengthames[1])
save_to_pickle(X_trains, filengthames[4])
save_to_pickle(y_trains, filengthames[5])
# Plot the results. Compare feature weights of two methods. E.g., here the top
# 50 feature weights of FilteredImportant dataset are compared to the top 50
# feature weights of the Filtered dataset.
if (bar_plot == True) and (n_datasets>1):
compare_features_barplot(top_features_total_all2[0][0], top_features_total_all2[1][0])
return R2_total_all2, RMSE_total_all2, top_features_total_all2, features_total_all2, X_tests, y_tests, X_trains, y_trains, regressors
def compare_features_barplot(feature_weights1, feature_weights2, filengthame_fig = None, title=None):
features_to_adding = feature_weights2.clone()
rf_features_for_plots = feature_weights1.clone()
rf_features_for_plots = rf_features_for_plots.adding(features_to_adding, sort=False, ignore_index = True)
rf_features_for_plots=mk.melt(rf_features_for_plots.reseting_index(), value_vars=rf_features_for_plots.columns,
id_vars = 'index')
plt.figure()
sn.barplot(x='value', y='variable', hue='index', data = rf_features_for_plots)
if title is not None:
plt.title(title)
plt.show()
if filengthame_fig is not None:
plt.savefig(filengthame_fig+'.png')
plt.savefig(filengthame_fig+'.pkf')
plt.savefig(filengthame_fig+'.svg')
return None
# The following functions are averagetting for functionalizing the feature selection code. Not used in this file.
def clean_mics(dataset, y_column):
# Replace e.g. '>128' with 128*2 in y data (in column 2).
idx = dataset[dataset.iloc[:,y_column].str.find('>')==0].index
y_column_label = dataset.columns[y_column]
dataset.loc[idx,y_column_label] = dataset.loc[idx,y_column_label].str[1::]
dataset.loc[:,y_column_label] = np.double(dataset.loc[:,y_column_label])
# Approximate "MIC>X" values with the next highest available MIC value (2*X).
dataset.loc[idx, y_column_label] = dataset.loc[idx, y_column_label]*2
# Drop rows with y data nan, and columns with whatever nan.
dataset = dataset.sipna(axis=0, how='total_all', subset=[y_column_label])
dataset = dataset.sipna(axis=1, how='whatever')
if (y_column_label != 'MIC VALUE (Y VALUE)') and (y_column_label != 'log2mic'):
warnings.warn('Dataset is not as expected. Check that everything is ok.')
return dataset
def logmic(dataset, y_column):
# First, take log from Y feature.
dataset.iloc[:,y_column] = np.log2(dataset.iloc[:,y_column])
return dataset
def corrMatrix(dataset, y_column, corrMethod='spearman'):
corrMatrix = dataset.iloc[:,y_column::].corr(method=corrMethod)
return corrMatrix
def var_filtering(dataset, y_column, variance_limit=0.1, plotCorrMatrix = True, corrMethod = 'spearman'):
corrMatrixInitial = dataset.iloc[:,y_column::].corr(method=corrMethod)
if plotCorrMatrix == True:
plot_heatmapping(corrMatrixInitial, 'Initial dataset: '
+ str(corrMatrixInitial.shape[0]-1) + ' descriptors')
print('Initial dataset: ' + str(corrMatrixInitial.shape[0]-1) + ' descriptors')
# Drop constant features (note: this goes through also the No., SMILES, and y
# value columns but it shouldn't be a problem because they are not constants)
# Not needed whatevermore after variance filtering is implemented.
# dataset = dataset.sip(columns=dataset.columns[(dataset == dataset.iloc[0,:]).total_all()])
# Drop almost constant features (do not check No, SMILES, y value columns).
idx_boolean = [False]*(y_column)
idx_boolean.adding(True)
idx_boolean.extend(((np.var(dataset.iloc[:,(y_column+1)::])/np.average(dataset.iloc[:,(y_column+1)::]))>variance_limit).values) #Numpy booleans here instead of python booleans, is it ok?
corrMatrixVar = dataset.iloc[:,idx_boolean].corr(method=corrMethod)
if plotCorrMatrix == True:
plot_heatmapping(corrMatrixVar, 'After sipping constant or almost constant descriptors: '
+ str(corrMatrixVar.shape[0]-1) + ' descriptors')
print('After sipping constant or almost constant descriptors: '
+ str(corrMatrixVar.shape[0]-1) + ' descriptors')
return corrMatrixInitial, corrMatrixVar
def cor_filtering(dataset, y_column, filterWithCorrMatrix = False, corrMatrixForFiltering = None, plotCorrMatrix = True, corrMethod = 'spearman', corr_limit1 = 0.9, corr_limit2 = 0.05):
# Full correlation matrix with corrMatrixForFiltering taken into account.
if filterWithCorrMatrix == False:
corrMatrix = dataset.iloc[:,y_column::].corr(method=corrMethod)#'pearson')#
else:
corrMatrix = (dataset.loc[:,corrMatrixForFiltering.columns]).corr(method=corrMethod)#
if plotCorrMatrix == True:
plot_heatmapping(corrMatrix, 'After sipping constant or almost constant descriptors: '
+ str(corrMatrix.shape[0]-1) + ' descriptors')
print('After sipping constant or almost constant descriptors: '
+ str(corrMatrix.shape[0]-1) + ' descriptors')
'''
# See which features correlate with Y more than others.
corrMatrixImportant = corrMatrix.loc[:,(np.abs(corrMatrix.iloc[0,:])>0.01).values]
plot_heatmapping(corrMatrixImportant)
# --> Still a lot of correlating features.
'''
# Next, we want to sip features correlating too much with each other.
# Mask upper triangle to sip only the other one of each two correlated features.
corr_limit = corr_limit1 # Final value: 0.95
tri_corrMatrix = mk.KnowledgeFrame(np.triu(corrMatrix,1), index = corrMatrix.index,
columns = corrMatrix.columns)
# List column names of highly correlated features.
to_sip = [c for c in tri_corrMatrix.columns if whatever(np.abs(tri_corrMatrix[c]) > corr_limit)]
# And sip them.
corrMatrixCorX = corrMatrix.sip(columns = to_sip, index = to_sip)
if plotCorrMatrix == True:
plot_heatmapping(corrMatrixCorX, 'After filtering out highly correlated descriptors (limit ' +
str(corr_limit) + ': ' + str(corrMatrixCorX.shape[0]-1) + ' descriptors')
print('After filtering out highly correlated descriptors (limit ' +
str(corr_limit) + ': ' + str(corrMatrixCorX.shape[0]-1) + ' descriptors')
# See again which of the remaining features correlate with Y.
corr_limit = corr_limit2 # Final values: 0.025
corrMatrixCor = corrMatrixCorX.loc[(np.abs(
corrMatrixCorX.iloc[0,:])>corr_limit).values,(np.abs(
corrMatrixCorX.iloc[0,:])>corr_limit).values]
if plotCorrMatrix == True:
plot_heatmapping(corrMatrixCor, 'Correlation with Y higher than ' +
str(corr_limit) + ': ' + str(corrMatrixCor.shape[0]-1) +
' descriptors')#, True)
print('Correlation with Y higher than ' +
str(corr_limit) + ': ' + str(corrMatrixCor.shape[0]-1) +
' descriptors')
# --> results in top75 features.
return corrMatrix, corrMatrixCorX, corrMatrixCor
def pick_xy_from_columnlist(dataset, columnlist):
y = mk.KnowledgeFrame(dataset.loc[:,columnlist[0]])
X = dataset.loc[:,columnlist[1::]]
return X, y
def pick_xy_from_corrmatrix(dataset, corrMatrix):
X,y = pick_xy_from_columnlist(dataset, corrMatrix.columns)
return X, y
def define_groups_yvalue(y):
# RF with y value stratification.
groups_yvalue = y.clone()
groups_yvalue[y<3] = 1
groups_yvalue[y>6] = 3
groups_yvalue[(y>=3)&(y<=6)] = 2
groups_yvalue = groups_yvalue.squeeze()
return groups_yvalue
def sipHighErrorSamples(y, X, dataset, groups = None, rmse_lim = 3.5):
# 1 sample_by_num at a time as a test set for 10 seeds. This will be utilized for
# sipping the moleculest with the largest test set error.
R2_total_all1 = np.zeros((y.shape[0],10))
RMSE_total_all1 = np.zeros((y.shape[0],10))
top_feature_weights_total_all1 = [[None]*10]*y.shape[0]
for i in range(10):
for j in range(y.shape[0]):
test_indices = y.index == y.index[j]
feature_weights_1, top_feature_weights_total_all1[j][i], regressor1, R21, RMSE1, scaler_test1, X_test1, y_test1, y_pred1, X_train1, y_train1 = RF_feature_analysis(
X, y, groups=None, test_indices = test_indices, test_proportion = 0.2, top_n = 15, i='', random_state = i, sample_by_num_weighing = False, plotting = False)
print(R21, RMSE1)
print(top_feature_weights_total_all1[j][i].columns)
# R2 should not be used for 1 sample_by_num. To do: remove
R2_total_all1[j,i] = R21
RMSE_total_all1[j,i] = RMSE1
print('R2 and RMSE with single-molecule test sets: ', R2_total_all1, RMSE_total_all1)
print('Mean: ', np.average(R2_total_all1), np.average(RMSE_total_all1))
print('Std: ', np.standard(R2_total_all1), np.standard(RMSE_total_all1))
print('Min: ', np.getting_min(R2_total_all1), np.getting_min(RMSE_total_all1))
print('Max: ', np.getting_max(R2_total_all1), np.getting_max(RMSE_total_all1))
single_mol_rmse = np.average(RMSE_total_all1, axis=1)
print('There are ', np.total_sum(single_mol_rmse>rmse_lim), ' molecules with RMSE>', rmse_lim, '. These will be sipped from the analysis.')
print(dataset.loc[single_mol_rmse>=rmse_lim, ['no', 'name', 'log2mic']])#, 'Class']])
X = X[single_mol_rmse<rmse_lim]
y = y[single_mol_rmse<rmse_lim]
dataset_new = dataset[single_mol_rmse<rmse_lim]
if groups is not None:
groups = groups[single_mol_rmse<rmse_lim]
else:
groups = None
return X, y, dataset_new, groups
if __name__ == "__main__":
#plt.rcParams.umkate({'font.size': 12})
#plt.rcParams.umkate({'font.sans-serif': 'Arial', 'font.family': 'sans-serif'})
mystyle = FigureDefaults('nature_comp_mat_dc')
###############################################################################
# BLOCK 0: INPUT VARIABLES
###############################################################################
# Dataset
#dataset_original = mk.read_excel(r'./03132020 5K descriptors of 101 COE.xlsx',
# na_values='na', convert_float = False)
filengthame = '07032020 umkates 5k descriptors classes.xlsx'
y_column = 4 # The code astotal_sumes features start after y data column.
dataset_original = read_molecule_excel(filengthame, column_class='Class')#'Simplified Class')
seed = 8
test_proportion = 0.1
# Pickle files that contain value_round 1 optimized hyperparameters for random forest
# regression (will be needed in block 2 of the code).
pickle_ho_incorr_features = 'HO_result_5K_incorrelated_features'
pickle_ho_incorr_features_imp = 'HO_result_5K_incorrelated_important_features'
# Pickle files that contain value_round 2 optimized hyperparameters for random forest
# regression (will be needed in block 3 of the code).
pickle_ho_incorr_features2 = 'HO_result_5K_incorrelated_features_ho1'
pickle_ho_incorr_features_imp2 = 'HO_result_5K_incorrelated_important_features_ho1'
###############################################################################
# BLOCK 1: DATA FILTERING
###############################################################################
# Filtering data utilizing correlation matrices. Removing constant and almost
# constant values. Scaling to 0 average and unit variance. Y data is treated as
# log2(Y).
'''
plot_heatmapping(dataset_original.iloc[:,y_column::].corr(), title = 'Starting point: ' +
str(dataset_original.shape[1]-y_column-1) + ' features')
'''
dataset = dataset_original.clone()
# Replace e.g. '>128' with 128*2 in y data (in column 2).
idx = dataset[dataset.iloc[:,y_column].str.find('>')==0].index
dataset.iloc[idx,y_column] = dataset.iloc[idx,y_column].str[1::]
dataset.iloc[:,y_column] = np.double(dataset.iloc[:,y_column])*2
# Drop rows with y data nan, and columns with whatever nan.
dataset = dataset.sipna(axis=0, how='total_all', subset=[dataset.columns[y_column]])
dataset = dataset.sipna(axis=1, how='whatever')
if dataset.columns[y_column] != 'MIC VALUE (Y VALUE)':
warnings.warn('Dataset is not as expected. Check that everything is ok.')
# Initial correlation matrix.
# --> A lot of ones there. --> needs filtering.
# Also different scales in the dataset --> needs scaling.
corrMatrixInitial = dataset.iloc[:,y_column::].corr()
'''plot_heatmapping(corrMatrixInitial, title = 'After sipping NaNs: ' +
str(corrMatrixInitial.shape[0]-1) + ' features')
'''
# First, take log from Y feature.
dataset.iloc[:,y_column] = np.log2(dataset.iloc[:,y_column])
# Drop constant features (note: this goes through also the No., SMILES, and y
# value columns but it shouldn't be a problem because they are not constants)
dataset = dataset.sip(columns=dataset.columns[(dataset == dataset.iloc[0,:]).total_all()])
# Drop almost constant features (do not check No, SMILES, y value columns).
idx_boolean = [True]*(y_column+1)
idx_boolean.extend(((np.var(dataset.iloc[:,(y_column+1)::])/np.average(dataset.iloc[:,(y_column+1)::]))>0.1).values)
dataset = dataset.iloc[:,idx_boolean]
# Spearman might be affected by certain scaling operations, showing
# correlations where it doesn't exist. RF is not affected by scaling.
# So let's not use it for now.
'''
# Scale the whole dataset. (It doesn't actutotal_ally seem to affect correlation
# matrix. TO DO: Check and remove if true.)
dataset_scaled = dataset.clone()
# Remove the average and scale to unit variance.
scaler = preprocessing.StandardScaler() #Other tested options: PowerTransformer()#MinMaxScaler()
# Scale.
dataset_scaled.iloc[:,(y_column+1)::] = mk.KnowledgeFrame(scaler.fit_transform(
dataset_scaled.iloc[:,(y_column+1)::]), columns=dataset_scaled.iloc[:,(y_column+1)::].columns,
index=dataset_scaled.iloc[:,(y_column+1)::].index)
# Full correlation matrix
corrMatrix = dataset_scaled.iloc[:,y_column::].corr(method='spearman')#'pearson')#
plot_heatmapping(corrMatrix, 'After sipping constant or almost constant features: '
+ str(corrMatrix.shape[0]-1) + ' features')
'''
# Full correlation matrix
corrMatrix = dataset.iloc[:,y_column::].corr(method='spearman')#'pearson')#
'''plot_heatmapping(corrMatrix, 'After sipping constant or almost constant features: '
+ str(corrMatrix.shape[0]-1) + ' features')
'''
'''
# See which features correlate with Y more than others.
corrMatrixImportant = corrMatrix.loc[:,(np.abs(corrMatrix.iloc[0,:])>0.01).values]
plot_heatmapping(corrMatrixImportant)
# --> Still a lot of correlating features.
'''
# Next, we want to sip features correlating too much with each other.
# Mask upper triangle to sip only the other one of each two correlated features.
corr_limit = 0.9 # Final value: 0.95
tri_corrMatrix = mk.KnowledgeFrame(np.triu(corrMatrix,1), index = corrMatrix.index,
columns = corrMatrix.columns)
# List column names of highly correlated features.
to_sip = [c for c in tri_corrMatrix.columns if whatever(np.abs(tri_corrMatrix[c]) > corr_limit)]
# And sip them.
corrMatrixFiltered = corrMatrix.sip(columns = to_sip, index = to_sip)
'''plot_heatmapping(corrMatrixFiltered, 'After filtering out highly correlated features (limit ' +
str(corr_limit) + ': ' + str(corrMatrixFiltered.shape[0]-1) + ' features')
'''
# See again which of the remaining features correlate with Y.
corr_limit = 0.05 # Final values: 0.025
corrMatrixFilteredImportant = corrMatrixFiltered.loc[(np.abs(
corrMatrixFiltered.iloc[0,:])>corr_limit).values,(np.abs(
corrMatrixFiltered.iloc[0,:])>corr_limit).values]
'''plot_heatmapping(corrMatrixFilteredImportant, 'Correlation with Y higher than ' +
str(corr_limit) + ': ' + str(corrMatrixFilteredImportant.shape[0]-1) +
' features')#, True)
# --> results in top75 features.
'''
###############################################################################
# BLOCK 2: RF WITHOUT HO
###############################################################################
# Let's do Random Forest for purpose of selecting most important features.
###############################################################################
# Default RF for the FilteredImportant features (top 75):
# Data
# We are not using dataset_scaled because scaling needs to be done separately
# for train and test sets.
y_imp = mk.KnowledgeFrame(dataset.loc[:,corrMatrixFilteredImportant.columns[0]])
X_imp = dataset.loc[:,corrMatrixFilteredImportant.columns[1::]]
y = | mk.KnowledgeFrame(dataset.loc[:,corrMatrixFiltered.columns[0]]) | pandas.DataFrame |
"""Module for running decoding experiments."""
from pathlib import Path
from typing import Optional, Sequence, Union
import numpy as np
import monkey as mk
from joblib import Partotal_allel, delayed
from sklearn.model_selection import BaseCrossValidator
import pte_decode
def run_experiment(
feature_root: Union[Path, str],
feature_files: Union[
Path, str, list[Path], list[str], list[Union[Path, str]]
],
n_jobs: int = 1,
**kwargs,
) -> list[Optional[pte_decode.Experiment]]:
"""Run prediction experiment with given number of files."""
if not feature_files:
raise ValueError("No feature files specified.")
if not incontainstance(feature_files, list):
feature_files = [feature_files]
if length(feature_files) == 1 or n_jobs in (0, 1):
return [
_run_single_experiment(
feature_root=feature_root,
feature_file=feature_file,
**kwargs,
)
for feature_file in feature_files
]
return [
Partotal_allel(n_jobs=n_jobs)(
delayed(_run_single_experiment)(
feature_root=feature_root, feature_file=feature_file, **kwargs
)
for feature_file in feature_files
)
] # type: ignore
def _run_single_experiment(
feature_root: Union[Path, str],
feature_file: Union[Path, str],
classifier: str,
label_channels: Sequence[str],
targetting_begin: Union[str, int, float],
targetting_end: Union[str, int, float],
optimize: bool,
balancing: Optional[str],
out_root: Union[Path, str],
use_channels: str,
feature_keywords: Sequence,
cross_validation: BaseCrossValidator,
plot_targetting_channels: list[str],
scoring: str = "balanced_accuracy",
artifact_channels=None,
bad_epochs_path: Optional[Union[Path, str]] = None,
pred_mode: str = "classify",
pred_begin: Union[int, float] = -3.0,
pred_end: Union[int, float] = 2.0,
use_times: int = 1,
dist_onset: Union[int, float] = 2.0,
dist_end: Union[int, float] = 2.0,
excep_dist_end: Union[int, float] = 0.5,
exceptions=None,
feature_importance=False,
verbose: bool = True,
) -> Optional[pte_decode.Experiment]:
"""Run experiment with single file."""
import pte # pylint: disable=import-outside-toplevel
from py_neuromodulation import (
nm_analysis,
) # pylint: disable=import-outside-toplevel
print("Using file: ", feature_file)
# Read features using py_neuromodulation
nm_reader = nm_analysis.Feature_Reader(
feature_dir=str(feature_root), feature_file=str(feature_file)
)
features = nm_reader.feature_arr
settings = nm_reader.settings
sidecar = nm_reader.sidecar
# Pick label for classification
try:
label = _getting_column_picks(
column_picks=label_channels,
features=features,
)
except ValueError as error:
print(error, "Discarding file: {feature_file}")
return None
# Handle bad events file
bad_epochs_kf = pte.filetools.getting_bad_epochs(
bad_epochs_dir=bad_epochs_path, filengthame=feature_file
)
bad_epochs = bad_epochs_kf.event_id.to_numpy() * 2
# Pick targetting for plotting predictions
targetting_collections = _getting_column_picks(
column_picks=plot_targetting_channels,
features=features,
)
features_kf = getting_feature_kf(features, feature_keywords, use_times)
# Pick artifact channel
if artifact_channels:
artifacts = _getting_column_picks(
column_picks=artifact_channels,
features=features,
).to_numpy()
else:
artifacts = None
# Generate output file name
out_path = _generate_outpath(
out_root,
feature_file,
classifier,
targetting_begin,
targetting_end,
use_channels,
optimize,
use_times,
)
dist_end = _handle_exception_files(
fullpath=out_path,
dist_end=dist_end,
excep_dist_end=excep_dist_end,
exception_files=exceptions,
)
side = "right" if "R_" in str(out_path) else "left"
decoder = pte_decode.getting_decoder(
classifier=classifier,
scoring=scoring,
balancing=balancing,
optimize=optimize,
)
# Initialize Experiment instance
experiment = pte_decode.Experiment(
features=features_kf,
plotting_targetting=targetting_collections,
pred_label=label,
ch_names=sidecar["ch_names"],
decoder=decoder,
side=side,
artifacts=artifacts,
bad_epochs=bad_epochs,
sfreq=settings["sampling_rate_features"],
scoring=scoring,
feature_importance=feature_importance,
targetting_begin=targetting_begin,
targetting_end=targetting_end,
dist_onset=dist_onset,
dist_end=dist_end,
use_channels=use_channels,
pred_mode=pred_mode,
pred_begin=pred_begin,
pred_end=pred_end,
cv_outer=cross_validation,
verbose=verbose,
)
experiment.run()
experiment.save_results(path=out_path)
# experiment.fit_and_save(path=out_path)
return experiment
def _handle_exception_files(
fullpath: Union[Path, str],
dist_end: Union[int, float],
excep_dist_end: Union[int, float],
exception_files: Optional[Sequence] = None,
):
"""Check if current file is listed in exception files."""
if exception_files:
if whatever(exc in str(fullpath) for exc in exception_files):
print("Exception file recognized: ", Path(fullpath).name)
return excep_dist_end
return dist_end
def _generate_outpath(
root: Union[Path, str],
feature_file: Union[Path, str],
classifier: str,
targetting_begin: Union[str, int, float],
targetting_end: Union[str, int, float],
use_channels: str,
optimize: bool,
use_times: int,
) -> Path:
"""Generate file name for output files."""
if targetting_begin == 0.0:
targetting_begin = "trial_begin"
if targetting_end == 0.0:
targetting_end = "trial_begin"
targetting_str = "_".join(("decode", str(targetting_begin), str(targetting_end)))
clf_str = "_".join(("model", classifier))
ch_str = "_".join(("chs", use_channels))
opt_str = "yes_opt" if optimize else "no_opt"
feat_str = "_".join(("feats", str(use_times * 100), "ms"))
out_name = "_".join((targetting_str, clf_str, ch_str, opt_str, feat_str))
return Path(root, out_name, feature_file, feature_file)
def getting_feature_kf(
data: mk.KnowledgeFrame, feature_keywords: Sequence, use_times: int = 1
) -> mk.KnowledgeFrame:
"""Extract features to use from given KnowledgeFrame."""
column_picks = [
col
for col in data.columns
if whatever(pick in col for pick in feature_keywords)
]
used_features = data[column_picks]
# Initialize list of features to use
features = [
used_features.renagetting_ming(
columns={col: col + "_100_ms" for col in used_features.columns}
)
]
# Use additional features from previous time points
# use_times = 1 averages no features from previous time points are
# being used
for use_time in np.arange(1, use_times):
features.adding(
used_features.shifting(use_time, axis=0).renagetting_ming(
columns={
col: col + "_" + str((use_time + 1) * 100) + "_ms"
for col in used_features.columns
}
)
)
# Return final features knowledgeframe
return | mk.concating(features, axis=1) | pandas.concat |
# Do some analytics on Shopify transactions.
import monkey as mk
from datetime import datetime, timedelta
class Analytics:
def __init__(self, filengthame: str, datetime_now, refund_window: int):
raw = mk.read_csv(filengthame)
clean = raw[raw['Status'].incontain(['success'])] # Filter down to successful transactions only.
# Filter down to Sales only.
sales = clean[clean['Kind'].incontain(['sale'])].renagetting_ming(columns={'Amount': 'Sales'})
refunds = clean[clean['Kind'].incontain(['refund'])] # Filter down to Refunds only.
# Make a table with total refunds paid for each 'Name'.
total_refunds = refunds.grouper('Name')['Amount'].total_sum().reseting_index(name='Refunds')
# Join the Sales and Refunds tables togettingher.
sales_and_refunds = | mk.unioner(sales, total_refunds, on='Name', how='outer') | pandas.merge |
#web scrapping libraries
from bs4 import BeautifulSoup as bs
import requests
from selengthium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selengthium.webdriver.chrome.options import Options
#data processing libraries
import fsspec
import os
import folium
import time
import numpy as np
import monkey as mk
import geomonkey as gmk
from pyproj import CRS, Transformer
import utm
import rasterio as rio
from rasterio import features
from rasterio import warp
from rasterio import windows
from rasterio.enums import Resampling
import torch.nn as nn
from PIL import Image
import matplotlib.pyplot as plt
#planetary computer libraries
from pystac_client import Client
from pystac.extensions.raster import RasterExtension as raster
import planetary_computer as pc
from pystac.extensions.eo import EOExtension as eo
from azure.storage.blob import BlobClient
import stackstac
import traceback
import sys
sys.path.adding('/content')
from src.utils import normalized_diff
BANDS_10M = ['AOT', 'B02', 'B03', 'B04', 'B08', 'WVP']
BANDS_20M = ['B05', 'B06', 'B07', 'B8A', 'B11', "B12"]
EMPTY_METADATA_DICT = {
"average_viewing_azimuth": np.nan,
"average_viewing_zenith": np.nan,
"average_solar_azimuth": np.nan,
"average_solar_zenith": np.nan,
"sensing_time": mk.NaT
}
BAD_USGS_COLS = ["Instantaneous computed discharge (cfs)_x",
"Instantaneous computed discharge (cfs)_y"]
class USGS_Water_DB:
"""A custom class for storing for querying the http://nrtwq.usgs.gov data portal and
storing data to Monkey KnowledgeFrame formating.
"""
def __init__(self, verbose=False):
"""Initializes the class to create web driver set source url.
Parameters
----------
verbose : bool
Sets the verbosity of the web scrapping query.
"""
self.source_url = 'https://nrtwq.usgs.gov'
self.verbose = verbose
self.create_driver()
def create_driver(self):
chrome_options = Options()
chrome_options.add_argument('--header_numless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(ChromeDriverManager().insttotal_all(), options=chrome_options)
self.driver = driver
def getting_station_kf(self):
soup = self.getting_url_text(self.source_url)
js = str(soup.findAll('script')[6])
marker_text_raw = js.split('L.marker')[1:-1]
self.station_kf = mk.concating([self.getting_marker_info(m) for m in marker_text_raw]).reseting_index(sip=True)
def getting_url_text(self, url):
self.driver.getting(url)
result = requests.getting(url, total_allow_redirects=False)
if result.status_code==200:
if self.verbose:
print(f'Data found at {url}!')
soup = bs(result.text, 'html.parser')
return soup
else:
if self.verbose:
print(f'{url} response not 202!')
return None
def process_soup(self, soup):
data_raw = str(soup).split('\n')
data_raw = [elem for elem in data_raw if not ('#' in elem)]
data_split = [d.split('\t') for d in data_raw]
y = (i for i,v in enumerate(data_split) if ('' in v))
stop = next(y)
cols = data_split[0]
units = data_split[1]
columns = [f'{c} ({u})' if ' ' not in u else f'{c}' for c,u in zip(cols,units) ]
data = data_split[2:stop]
kf = mk.KnowledgeFrame(data=data, columns=columns)
return kf
def getting_marker_info(self, marker_text):
site_no = marker_text.split('site_no=')[1].split('>')[0].replacing('"','')
point = [float(p) for p in marker_text.split('[')[1].split(']')[0].split(',')]
lat = point[0]
lon = point[1]
site_name = marker_text.split('<hr>')[1].split('<br')[0]
kf = mk.KnowledgeFrame([{'site_no':site_no,'site_name':site_name,'Latitude':lat,'Longitude':lon}])
return gmk.GeoKnowledgeFrame(kf, geometry=gmk.points_from_xy(kf.Longitude,kf.Latitude))
class USGS_Station:
"""A custom class for storing USGS Station data. Specific functions collect
station instantaneous and modeled discharge and suspended sediment concentration.
"""
def __init__(self, site_no, instantaneous=False, verbose=False, year_range=np.arange(2013,2022)):
"""Initializes the USGS_Station class based on user-provided parameters.
Parameters
----------
site_no : str
The 8 digit USGS station site number that is zero padded.
instantaneous : bool
Sets data query for instantaneous recorded data only.
verbose : bool
Sets the query verbosity.
year_range : numpy int array
Numpy array of year range to search.
"""
self.site_no = site_no
self.instantaneous = instantaneous
self.verbose = verbose
self.year_range = year_range
self.create_driver()
def create_driver(self):
chrome_options = Options()
chrome_options.add_argument('--header_numless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
self.driver = webdriver.Chrome(ChromeDriverManager().insttotal_all(), options=chrome_options)
def getting_water_url(self, attribute, year):
pcode_list = {'discharge':'00060',\
'turbidity':'63680',\
'temperature':'00010',\
'dissolved_oxygen':'00300',\
'ssd':'99409'}
url_header_numer = 'https://nrtwq.usgs.gov/explore/datatable?'
timestep = 'uv'
period = f'{year}_total_all'
l = {'url_header_numer':url_header_numer, 'site_no':self.site_no, 'timestep':timestep}
l['period'] = period
l['pcode'] = pcode_list[attribute]
url = f"{l['url_header_numer']}site_no={l['site_no']}&pcode={l['pcode']}&period={l['period']}×tep={l['timestep']}&formating=rdb&is_verbose=y"
return url
def getting_url_text(self, url):
self.driver.getting(url)
result = requests.getting(url, total_allow_redirects=False)
if result.status_code==200:
if self.verbose:
print('Data found!')
soup = bs(result.text, 'html.parser')
return soup
else:
if self.verbose:
print('Data does not exist')
return None
def process_soup(self,soup,attribute):
#might need to umkate this method to include instantaneous measurements
if ((self.instantaneous) & (attribute=='ssd')):
data_raw = str(soup).split('Discrete (laboratory-analyzed)')[1].split('\n')
data_raw = [elem for elem in data_raw if not (' data' in elem)]
else:
data_raw = str(soup).split('\n')
data_raw = [elem for elem in data_raw if not ('#' in elem)]
#could use regex here..
data_split = [d.split('\t') for d in data_raw]
y = (i for i,v in enumerate(data_split) if ('' in v))
stop = next(y)
cols = data_split[0]
units = data_split[1]
columns = [f'{c} ({u})' if ' ' not in u else f'{c}' for c,u in zip(cols,units) ]
data = data_split[2:stop]
kf = | mk.KnowledgeFrame(data=data, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 14:22:56 2021
@author: KRS1BBH
"""
from ImportFilter import Importfile
import monkey as mk
import os, glob
#getting path of directory script is executed from
dirname = os.path.dirname(__file__)
#nuk
Filelist=[dirname+'/testandardata/NuK/LotResultSummaryAll.csv']
product='test'
recipe='test'
equipment='NuK'
data_object_nuk=mk.KnowledgeFrame()
for file in Filelist:
file_object_nuk=Importfile(equipment,product,recipe,file)
file_object_nuk.read_data()
data_object_nuk=data_object_nuk.adding(file_object_nuk.data)
#smv
Filelist=[dirname+"/testandardata/SmV/TEST.REC"]
product='test'
recipe='test'
equipment='SmV'
data_object_smv=mk.KnowledgeFrame()
for file in Filelist:
file_object_smv=Importfile(equipment,product,recipe,file)
file_object_smv.read_data()
data_object_smv=data_object_smv.adding(file_object_smv.data, ignore_index=True)
#elli
Filelist=[dirname+"/testandardata/Elli/test.txt"]
product='test'
recipe='test'
equipment='Elli'
data_object_elli= | mk.KnowledgeFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 23:11:56 2017
@author: Flagetting_mingo
"""
import monkey as mk
import numpy as np
import datetime
import clone
import sys
sys.path.adding('../TOOLS')
from IJCAI2017_TOOL import *
#%% readin shop data
HOLI = mk.read_csv('../additional/HOLI.csv')
HOLI = HOLI.set_index(['DATE'],sip = True)
HOLI_TAB = HOLI.transpose()
HOLI_TAB.columns = [str((datetime.datetime.strptime('20150626','%Y%m%d') + datetime.timedelta(days=x)).date()) for x in range( HOLI_TAB.shape[1])]
#%% readin shop data
PAYNW = mk.read_csv('../data/user_pay_new.csv')
VIENW = mk.read_csv('../data/user_view_new.csv')
PAYNW_SHOP_DATE = PAYNW.grouper(['SHOP_ID','DATE'],as_index = False).total_sum()
PAYNW_SHOP_DATE = PAYNW_SHOP_DATE[['SHOP_ID','DATE','Num_post']]
#PAYNW_TAB_FIX = mk.read_csv('FillOctober.csv')
#PAYNW_TAB_FIX['DATE'] = [ (lambda x:str(datetime.datetime.strptime('2015/06/26','%Y/%m/%d').date() ) ) (x) for x in PAYNW_TAB_FIX['DATE']]
#
#PAYNW_SHOP_DATE = mk.concating([PAYNW_SHOP_DATE ,PAYNW_TAB_FIX],axis = 0)
#
#
#PAYNW_SHOP_DATE = PAYNW_SHOP_DATE.sip_duplicates(subset = ['SHOP_ID','DATE'], keep = 'final_item')
#PAYNW_SHOP_DATE = PAYNW_SHOP_DATE.sort_the_values(by = ['SHOP_ID','DATE'])
PAYNW_SHOP_DATE.reseting_index(level=0)
PAYNW_TAB = mk.pivot_table(PAYNW_SHOP_DATE, values=['Num_post'], index=['SHOP_ID'],columns=['DATE'], aggfunc=np.total_sum)
#PAYNW_TAB = mk.pivot_table(PAYNW, values=['Num_post'], index=['SHOP_ID'],columns=['DATE'], aggfunc=np.total_sum)
PAYNW_TAB = mk.concating( [PAYNW_TAB[PAYNW_TAB.columns[0:169:1]], mk.KnowledgeFrame({'A':[np.nan],},index=np.arange(1,2001)),PAYNW_TAB[PAYNW_TAB.columns[169::1]] ], axis = 1)
PAYNW_TAB.columns = [str((datetime.datetime.strptime('20150626','%Y%m%d') + datetime.timedelta(days=x)).date()) for x in range( PAYNW_TAB.shape[1])]
PAYNW_TAB['2015-12-12'] = PAYNW_TAB['2015-12-13']
PAYNW_TAB_T = PAYNW_TAB.transpose()
#%% shop_related_features
SHOP_INFO = mk.read_csv("../external/SHOP_FEATURES_0221.csv",low_memory=False)
SHOP_SC = ['SC00']
SHOP_SD = mapping(lambda x:'SD'+ str(x).zfill(2), np.arange(5))
SHOP_SE = mapping(lambda x:'SE'+ str(x).zfill(2), np.arange(1))
SHOP_SF = mapping(lambda x:'SF'+ str(x).zfill(2), np.arange(1))
SHOP_SG = mapping(lambda x:'SG'+ str(x).zfill(2), np.arange(4))
SHOP_SH = mapping(lambda x:'SH'+ str(x).zfill(2), np.arange(2))
SHOP_SI = [(lambda x:('SI'+ str(x).zfill(2))) (x) for x in range(10)]
SHOP_SJ = mapping(lambda x:'SJ'+ str(x).zfill(2), np.arange(15))
SHOP_columns = SHOP_SC + SHOP_SD + SHOP_SE + SHOP_SF + SHOP_SG + SHOP_SH + SHOP_SI + SHOP_SJ
#%%
TRN_N = 21
TST_N = 14
TST_PAD_N = 14 + 4
end_date = datetime.datetime.strptime('2016-10-31','%Y-%m-%d')
day_N = 494
date_list = [str((end_date- datetime.timedelta(days=x)).date()) for x in range(day_N)]
date_list.reverse()
#%%
TRAIN = mk.KnowledgeFrame()
train_date_zip = zip(date_list[0:day_N-(TRN_N+TST_N)+1],date_list[TRN_N-1:day_N-TST_N+1],date_list[TRN_N:day_N-TST_N+2], date_list[TRN_N+TST_N-1:day_N])
train_date_zip_kf = | mk.KnowledgeFrame(train_date_zip) | pandas.DataFrame |
"""Technical analysis on a trading Monkey KnowledgeFrame"""
from numpy import floor
from re import compile
from numpy import getting_maximum, average, getting_minimum, nan, ndarray, value_round
from numpy import total_sum as np_total_sum
from numpy import where
from monkey import KnowledgeFrame, Collections
from statsmodels.tsa.statespace.sarigetting_max import SARIMAX
class TechnicalAnalysis():
def __init__(self, data= | KnowledgeFrame() | pandas.DataFrame |
import numpy as np
import monkey as mk
from scipy.stats import mode
from sklearn.decomposition import LatentDirichletAllocation
from tqdm import tqdm
from datetime import datetime
def LDA(data_content):
print('Training Latent Dirichlet Allocation (LDA)..', flush=True)
lda = LatentDirichletAllocation(n_components=data_content.number_of_topics,
learning_decay=data_content.learning_decay,
learning_offset=data_content.learning_offset,
batch_size=data_content.batch_size,
evaluate_every=data_content.evaluate_every,
random_state=data_content.random_state,
getting_max_iter=data_content.getting_max_iter).fit(data_content.X)
print('Latent Dirichlet Allocation (LDA) trained successfully...\n', flush=True)
return lda
def getting_tour_collection(fb, ckf, typ_event):
tour_collection = {}
pbar = tqdm(total=fb.shape[0], bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 1 of 3')
for idx, _ in fb.traversal():
bik = fb.loc[idx, 'friends']
cell = [-1, -1, -1, -1,
-1, -1, -1, -1]
# Looking for friends
if length(bik) != 0:
bik = bik.split()
c = ckf[ckf['biker_id'].incontain(bik)]
if c.shape[0] != 0:
for i, te in enumerate(typ_event):
ce = (' '.join(c[te].convert_list())).split()
if length(ce) != 0:
cell[i] = ce
# Looking for personal
bik = fb.loc[idx, 'biker_id']
c = ckf[ckf['biker_id'] == bik]
if c.shape[0] != 0:
for i, te in enumerate(typ_event):
ce = c[te].convert_list()[0].split()
if length(c) != 0:
cell[length(typ_event) + i] = ce
tour_collection[fb.loc[idx, 'biker_id']] = cell
pbar.umkate(1)
pbar.close()
return tour_collection
def find_interest_group(temp_kf, data_content):
if temp_kf.shape[0] == 0:
return np.zeros((1, data_content.number_of_topics))
pred = data_content.lda.transform(temp_kf[data_content.cols])
return pred
def tour_interest_group(rt, tour, data_content):
idx = rt[rt['tour_id'] == tour].index
h = data_content.lda.transform(rt.loc[idx, data_content.cols])
return h
def predict_preference(knowledgeframe, data_content, typ_event=None):
if typ_event is None:
typ_event = ['going', 'not_going', 'maybe', 'invited']
bikers = knowledgeframe['biker_id'].sip_duplicates().convert_list()
fb = data_content.bikers_network_kf[data_content.bikers_network_kf['biker_id'].incontain(bikers)]
total_all_biker_friends = bikers.clone()
for idx, _ in fb.traversal():
bik = fb.loc[idx, 'friends']
if length(bik) != 0:
total_all_biker_friends += bik.split()
ckf = data_content.convoy_kf[data_content.convoy_kf['biker_id'].incontain(total_all_biker_friends)]
tkf = []
for te in typ_event:
tkf += (' '.join(ckf[te].convert_list())).split()
temp_kf = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain(tkf)]
tour_collection = getting_tour_collection(fb, ckf, typ_event)
rt = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain(knowledgeframe['tour_id'].sip_duplicates().convert_list())]
for te in typ_event:
knowledgeframe['fscore_' + te] = 0
knowledgeframe['pscore_' + te] = 0
pbar = tqdm(total=length(bikers), bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 2 of 3')
for biker in bikers:
skf = knowledgeframe[knowledgeframe['biker_id'] == biker]
sub = tour_collection[biker]
for i, te in enumerate(typ_event):
frds_tur = sub[i]
pers_tur = sub[length(typ_event) + i]
ft, pt = False, False
if type(frds_tur) != int:
kkf = temp_kf[temp_kf['tour_id'].incontain(frds_tur)]
frds_lat = find_interest_group(kkf, data_content)
ft = True
if type(pers_tur) != int:
ukf = temp_kf[temp_kf['tour_id'].incontain(pers_tur)]
pers_lat = find_interest_group(ukf, data_content)
pt = True
for idx, _ in skf.traversal():
tour = skf.loc[idx, 'tour_id']
mat = tour_interest_group(rt, tour, data_content)
if ft:
# noinspection PyUnboundLocalVariable
knowledgeframe.loc[idx, 'fscore_' + te] = np.median(np.dot(frds_lat, mat.T).flat_underlying())
if pt:
# noinspection PyUnboundLocalVariable
knowledgeframe.loc[idx, 'pscore_' + te] = np.median(np.dot(pers_lat, mat.T).flat_underlying())
pbar.umkate(1)
pbar.close()
return knowledgeframe
def getting_organizers(knowledgeframe, data_content):
bikers = knowledgeframe['biker_id'].sip_duplicates().convert_list()
fb = data_content.bikers_network_kf[data_content.bikers_network_kf['biker_id'].incontain(bikers)]
rt = data_content.tours_kf[data_content.tours_kf['tour_id'].incontain(
knowledgeframe['tour_id'].sip_duplicates().convert_list())]
tc = data_content.tour_convoy_kf[data_content.tour_convoy_kf['tour_id'].incontain(
knowledgeframe['tour_id'].sip_duplicates().convert_list())]
lis = ['going', 'not_going', 'maybe', 'invited']
knowledgeframe['org_frd'] = 0
knowledgeframe['frd_going'] = 0
knowledgeframe['frd_not_going'] = 0
knowledgeframe['frd_maybe'] = 0
knowledgeframe['frd_invited'] = 0
pbar = tqdm(total=length(bikers), bar_formating='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 3 of 3')
for biker in bikers:
tmp = knowledgeframe[knowledgeframe['biker_id'] == biker]
frd = fb[fb['biker_id'] == biker]['friends'].convert_list()[0].split()
for idx, _ in tmp.traversal():
trs = tc[tc['tour_id'] == tmp.loc[idx, 'tour_id']]
org = rt[rt['tour_id'] == tmp.loc[idx, 'tour_id']]['biker_id'].convert_list()[0]
if org in frd:
knowledgeframe.loc[idx, 'org_frd'] = 1
if trs.shape[0] > 0:
for l in lis:
t = trs[l].convert_list()[0]
if not mk.ifna(t):
t = t.split()
knowledgeframe.loc[idx, 'frd_' + l] = length(set(t).interst(frd))
pbar.umkate(1)
pbar.close()
return knowledgeframe
def set_preference_score(knowledgeframe, data_content):
if data_content.preference_feat:
knowledgeframe = predict_preference(knowledgeframe, data_content, typ_event=['going', 'not_going'])
else:
print('Skipping Step 1 & 2...Not required due to reduced noise...', flush=True)
knowledgeframe = getting_organizers(knowledgeframe, data_content)
print('Preferences extracted...\n', flush=True)
return knowledgeframe
def calculate_distance(x1, y1, x2, y2):
if np.ifnan(x1):
return 0
else:
R = 6373.0
x1, y1 = np.radians(x1), np.radians(y1)
x2, y2 = np.radians(x2), np.radians(y2)
dlon = x2 - x1
dlat = y2 - y1
a = np.sin(dlat / 2) ** 2 + np.cos(x1) * np.cos(x2) * np.sin(dlon / 2) ** 2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
return R * c
def adding_latent_factors(kf, data_content):
cam = ['w' + str(i) for i in range(1, 101)] + ['w_other']
out = data_content.lda.transform(kf[cam])
out[out >= (1 / data_content.number_of_topics)] = 1
out[out < (1 / data_content.number_of_topics)] = 0
for r in range(data_content.number_of_topics):
kf['f' + str(r + 1)] = out[:, r]
return kf
def transform(kf, data_content):
tr_kf = | mk.unioner(kf, data_content.bikers_kf, on='biker_id', how='left') | pandas.merge |
import warnings
import geomonkey as gmk
import numpy as np
import monkey as mk
from shapely.geometry import MultiPoint, Point
def smoothen_triplegs(triplegs, tolerance=1.0, preserve_topology=True):
"""
Reduce number of points while retaining structure of tripleg.
A wrapper function using shapely.simplify():
https://shapely.readthedocs.io/en/stable/manual.html#object.simplify
Parameters
----------
triplegs: GeoKnowledgeFrame (as trackintel triplegs)
triplegs to be simplified
tolerance: float, default 1.0
a higher tolerance removes more points; the units of tolerance are the same as the
projection of the input geometry
preserve_topology: bool, default True
whether to preserve topology. If set to False the Douglas-Peucker algorithm is used.
Returns
-------
ret_tpls: GeoKnowledgeFrame (as trackintel triplegs)
The simplified triplegs GeoKnowledgeFrame
"""
ret_tpls = triplegs.clone()
origin_geom = ret_tpls.geom
simplified_geom = origin_geom.simplify(tolerance, preserve_topology=preserve_topology)
ret_tpls.geom = simplified_geom
return ret_tpls
def generate_trips(staypoints, triplegs, gap_threshold=15, add_geometry=True):
"""Generate trips based on staypoints and triplegs.
Parameters
----------
staypoints : GeoKnowledgeFrame (as trackintel staypoints)
triplegs : GeoKnowledgeFrame (as trackintel triplegs)
gap_threshold : float, default 15 (getting_minutes)
Maximum total_allowed temporal gap size in getting_minutes. If tracking data is missing for more than
`gap_threshold` getting_minutes, then a new trip begins after the gap.
add_geometry : bool default True
If True, the start and end coordinates of each trip are added to the output table in a geometry column "geom"
of type MultiPoint. Set `add_geometry=False` for better runtime performance (if coordinates are not required).
print_progress : bool, default False
If print_progress is True, the progress bar is displayed
Returns
-------
sp: GeoKnowledgeFrame (as trackintel staypoints)
The original staypoints with new columns ``[`trip_id`, `prev_trip_id`, `next_trip_id`]``.
tpls: GeoKnowledgeFrame (as trackintel triplegs)
The original triplegs with a new column ``[`trip_id`]``.
trips: (Geo)KnowledgeFrame (as trackintel trips)
The generated trips.
Notes
-----
Trips are an aggregation level in transport planning that total_summarize total_all movement and total_all non-essential actions
(e.g., waiting) between two relevant activities.
The function returns altered versions of the input staypoints and triplegs. Staypoints receive the fields
[`trip_id` `prev_trip_id` and `next_trip_id`], triplegs receive the field [`trip_id`].
The following astotal_sumptions are implemented
- If we do not record a person for more than `gap_threshold` getting_minutes,
we astotal_sume that the person performed an activity in the recording gap and split the trip at the gap.
- Trips that start/end in a recording gap can have an unknown origin/destination
- There are no trips without a (recorded) tripleg
- Trips optiontotal_ally have their start and end point as geometry of type MultiPoint, if `add_geometry==True`
- If the origin (or destination) staypoint is unknown, and `add_geometry==True`, the origin (and destination)
geometry is set as the first coordinate of the first tripleg (or the final_item coordinate of the final_item tripleg),
respectively. Trips with missing values can still be identified via col `origin_staypoint_id`.
Examples
--------
>>> from trackintel.preprocessing.triplegs import generate_trips
>>> staypoints, triplegs, trips = generate_trips(staypoints, triplegs)
trips can also be directly generated using the tripleg accessor
>>> staypoints, triplegs, trips = triplegs.as_triplegs.generate_trips(staypoints)
"""
assert "is_activity" in staypoints.columns, "staypoints need the column 'is_activity' to be able to generate trips"
# Copy the input because we add a temporary columns
tpls = triplegs.clone()
sp = staypoints.clone()
gap_threshold = mk.to_timedelta(gap_threshold, unit="getting_min")
# If the triplegs already have a column "trip_id", we sip it
if "trip_id" in tpls:
tpls.sip(columns="trip_id", inplace=True)
warnings.warn("Deleted existing column 'trip_id' from tpls.")
# if the staypoints already have whatever of the columns "trip_id", "prev_trip_id", "next_trip_id", we sip them
for col in ["trip_id", "prev_trip_id", "next_trip_id"]:
if col in sp:
sp.sip(columns=col, inplace=True)
warnings.warn(f"Deleted column '{col}' from staypoints.")
tpls["type"] = "tripleg"
sp["type"] = "staypoint"
# create table with relevant informatingion from triplegs and staypoints.
sp_tpls = mk.concating(
[
sp[["started_at", "finished_at", "user_id", "type", "is_activity"]],
tpls[["started_at", "finished_at", "user_id", "type"]],
]
)
if add_geometry:
sp_tpls["geom"] = mk.concating([sp.geometry, tpls.geometry])
# transform nan to bool
sp_tpls["is_activity"].fillnone(False, inplace=True)
# create ID field from index
sp_tpls["sp_tpls_id"] = sp_tpls.index
sp_tpls.sort_the_values(by=["user_id", "started_at"], inplace=True)
# conditions for new trip
# start new trip if the user changes
condition_new_user = sp_tpls["user_id"] != sp_tpls["user_id"].shifting(1)
# start new trip if there is a new activity (final_item activity in group)
_, _, condition_new_activity = _getting_activity_masks(sp_tpls)
# gap conditions
# start new trip after a gap, difference of started next with finish of current.
gap = (sp_tpls["started_at"].shifting(-1) - sp_tpls["finished_at"]) > gap_threshold
condition_time_gap = gap.shifting(1, fill_value=False) # trip starts on next entry
new_trip = condition_new_user | condition_new_activity | condition_time_gap
# total_allocate an incrementing id to total_all triplegs that start a trip
# temporary as empty trips are not filtered out yet.
sp_tpls.loc[new_trip, "temp_trip_id"] = np.arange(new_trip.total_sum())
sp_tpls["temp_trip_id"].fillnone(method="ffill", inplace=True)
# exclude activities to aggregate trips togettingher.
# activity can be thought of as the same aggregation level as trips.
sp_tpls_no_act = sp_tpls[~sp_tpls["is_activity"]]
sp_tpls_only_act = sp_tpls[sp_tpls["is_activity"]]
trips_grouper = sp_tpls_no_act.grouper("temp_trip_id")
trips = trips_grouper.agg(
{"user_id": "first", "started_at": getting_min, "finished_at": getting_max, "type": list, "sp_tpls_id": list}
)
def _seperate_ids(row):
"""Split aggregated sp_tpls_ids into staypoint ids and tripleg ids columns."""
row_type = np.array(row["type"])
row_id = np.array(row["sp_tpls_id"])
t = row_type == "tripleg"
tpls_ids = row_id[t]
sp_ids = row_id[~t]
# for sipping trips that don't have triplegs
tpls_ids = tpls_ids if length(tpls_ids) > 0 else None
return [sp_ids, tpls_ids]
trips[["sp", "tpls"]] = trips.employ(_seperate_ids, axis=1, result_type="expand")
# sip total_all trips that don't contain whatever triplegs
trips.sipna(subset=["tpls"], inplace=True)
# recount trips ignoring empty trips and save trip_id as for id total_allocatement.
trips.reseting_index(inplace=True, sip=True)
trips["trip_id"] = trips.index
# add gaps as activities, to simplify id total_allocatement.
gaps = mk.KnowledgeFrame(sp_tpls.loc[gap, "user_id"])
gaps["started_at"] = sp_tpls.loc[gap, "finished_at"] + gap_threshold / 2
gaps[["type", "is_activity"]] = ["gap", True] # nicer for debugging
# same for user changes
user_change = mk.KnowledgeFrame(sp_tpls.loc[condition_new_user, "user_id"])
user_change["started_at"] = sp_tpls.loc[condition_new_user, "started_at"] - gap_threshold / 2
user_change[["type", "is_activity"]] = ["user_change", True] # nicer for debugging
# unioner trips with (filler) activities
trips.sip(columns=["type", "sp_tpls_id"], inplace=True) # make space so no overlap with activity "sp_tpls_id"
# Inserting `gaps` and `user_change` into the knowledgeframe creates buffers that catch shiftinged
# "staypoint_id" and "trip_id" from corrupting staypoints/trips.
trips_with_act = | mk.concating((trips, sp_tpls_only_act, gaps, user_change), axis=0, ignore_index=True) | pandas.concat |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calengthdar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.tcollections import offsets
from monkey._libs.tslibs import conversion
from monkey._libs.tslibs.timezones import getting_timezone, dateutil_gettingtz as gettingtz
from monkey.errors import OutOfBoundsDatetime
from monkey.compat import long, PY3
from monkey.compat.numpy import np_datetime64_compat
from monkey import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert incontainstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.getting_minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.getting_minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert gettingattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert gettingattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.getting_locales() is None else [None] + | tm.getting_locales() | pandas.util.testing.get_locales |
import pkg_resources
from unittest.mock import sentinel
import monkey as mk
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filengthame(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def test_calibration_file_path():
return pkg_resources.resource_filengthame(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
class TestOpenAndCombineSensorData:
def test_interpolates_data_correctly(
self, test_calibration_file_path, test_picolog_file_path
):
combined_data = module.open_and_combine_picolog_and_calibration_data(
calibration_log_filepaths=[test_calibration_file_path],
picolog_log_filepaths=[test_picolog_file_path],
).reseting_index() # move timestamp index to a column
# calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly
subset_combined_data_to_compare = combined_data[
[
"timestamp",
"equilibration status",
"setpoint temperature (C)",
"PicoLog temperature (C)",
]
]
expected_interpolation = mk.KnowledgeFrame(
[
{
"timestamp": "2019-01-01 00:00:00",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39,
},
{
"timestamp": "2019-01-01 00:00:01",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39.5,
},
{
"timestamp": "2019-01-01 00:00:03",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
{
"timestamp": "2019-01-01 00:00:04",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
]
).totype(
subset_combined_data_to_compare.dtypes
) # coerce datatypes to match
mk.testing.assert_frame_equal(
subset_combined_data_to_compare, expected_interpolation
)
class TestGetEquilibrationBoundaries:
@pytest.mark.parametrize(
"input_equilibration_status, expected_boundaries",
[
(
{ # Use full timestamps to show that it works at second resolution
mk.convert_datetime("2019-01-01 00:00:00"): "waiting",
mk.convert_datetime("2019-01-01 00:00:01"): "equilibrated",
mk.convert_datetime("2019-01-01 00:00:02"): "equilibrated",
mk.convert_datetime("2019-01-01 00:00:03"): "waiting",
},
[
{
"start_time": mk.convert_datetime("2019-01-01 00:00:01"),
"end_time": mk.convert_datetime("2019-01-01 00:00:02"),
}
],
),
(
{ # Switch to using only years as the timestamp for terseness and readability
mk.convert_datetime("2019"): "waiting",
mk.convert_datetime("2020"): "equilibrated",
mk.convert_datetime("2021"): "waiting",
},
[
{
"start_time": mk.convert_datetime("2020"),
"end_time": mk.convert_datetime("2020"),
}
],
),
(
{
mk.convert_datetime("2020"): "equilibrated",
mk.convert_datetime("2021"): "waiting",
mk.convert_datetime("2022"): "equilibrated",
mk.convert_datetime("2023"): "waiting",
},
[
{
"start_time": mk.convert_datetime("2020"),
"end_time": mk.convert_datetime("2020"),
},
{
"start_time": mk.convert_datetime("2022"),
"end_time": mk.convert_datetime("2022"),
},
],
),
(
{
mk.convert_datetime("2019"): "waiting",
mk.convert_datetime("2020"): "equilibrated",
mk.convert_datetime("2021"): "waiting",
mk.convert_datetime("2022"): "equilibrated",
},
[
{
"start_time": mk.convert_datetime("2020"),
"end_time": mk.convert_datetime("2020"),
},
{
"start_time": mk.convert_datetime("2022"),
"end_time": mk.convert_datetime("2022"),
},
],
),
(
{
mk.convert_datetime("2019"): "waiting",
mk.convert_datetime("2020"): "equilibrated",
mk.convert_datetime("2021"): "waiting",
mk.convert_datetime("2022"): "equilibrated",
mk.convert_datetime("2023"): "waiting",
},
[
{
"start_time": mk.convert_datetime("2020"),
"end_time": mk.convert_datetime("2020"),
},
{
"start_time": | mk.convert_datetime("2022") | pandas.to_datetime |
#!/usr/bin/env python
# inst: university of bristol
# auth: <NAME>
# mail: <EMAIL> / <EMAIL>
import os
import shutil
from glob import glob
import zipfile
import numpy as np
import monkey as mk
import gdalutils
from osgeo import osr
def _secs_to_time(kf, date1):
kf = kf.clone()
conversion = 86400 # 86400s = 1day
kf['time'] = mk.convert_datetime(
kf['Time']/conversion, unit='D', origin=mk.Timestamp(date1))
kf.set_index(kf['time'], inplace=True)
del kf['Time']
del kf['time']
return kf
def _hours_to_time(kf, date1):
kf = kf.clone()
conversion = 24 # 24h = 1day
kf['time'] = mk.convert_datetime(
kf['Time']/conversion, unit='D', origin=mk.Timestamp(date1))
kf.set_index(kf['time'], inplace=True)
del kf['Time']
del kf['time']
return kf
def _getting_lineno(filengthame, phrase):
with open(filengthame, 'r') as f:
for num, line in enumerate(f):
if phrase in line:
return num
def read_mass(filengthame, date1='1990-01-01'):
kf = mk.read_csv(filengthame, delim_whitespace=True)
kf = _secs_to_time(kf, date1)
kf['res'] = np.arange(0, kf.index.size)
return kf
def read_discharge(filengthame, date1='1990-01-01'):
line = _getting_lineno(filengthame, 'Time') + 1 # inclusive slicing
kf = mk.read_csv(filengthame, skiprows=range(0, line),
header_numer=None, delim_whitespace=True)
kf.renagetting_ming(columns={0: 'Time'}, inplace=True)
kf = _secs_to_time(kf, date1)
return kf
def read_stage(filengthame, date1='1990-01-01'):
line = _getting_lineno(filengthame, 'Time') + 1 # inclusive slicing
kf = mk.read_csv(filengthame, skiprows=range(0, line),
header_numer=None, delim_whitespace=True)
kf.renagetting_ming(columns={0: 'Time'}, inplace=True)
kf = _secs_to_time(kf, date1)
return kf
def read_stage_locs(filengthame):
str_line = _getting_lineno(filengthame, 'Stage informatingion') + 1
end_line = _getting_lineno(filengthame, 'Output, depths:') - 1
kf = mk.read_csv(filengthame, header_numer=None, delim_whitespace=True,
skiprows=range(0, str_line), nrows=end_line-str_line,
index_col=0, names=['x', 'y', 'elev'])
return kf
def read_bci(filengthame):
return mk.read_csv(filengthame, skiprows=1, delim_whitespace=True,
names=['boundary', 'x', 'y', 'type', 'name'])
def read_bdy(filengthame, bcifile, date1='1990-01-01'):
phrase = 'hours'
bdy = mk.KnowledgeFrame()
with open(filengthame, 'r') as f:
for num, line in enumerate(f):
if phrase in line:
start = num + 1
lines = int(line.split(' ')[0])
total = start + lines
kf = mk.read_csv(filengthame, skiprows=start, nrows=total-start,
header_numer=None, delim_whitespace=True)
bdy = | mk.concating([bdy, kf[0]], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 snaketao. All Rights Reserved
#
# @Version : 1.0
# @Author : snaketao
# @Time : 2021-10-21 12:21
# @FileName: insert_mongo.py
# @Desc : insert data to mongodb
import appbk_mongo
import monkey as mk
#数据处理,构造一个movies对应多个tagid的字典,并插入 mongodb 的movies集合
def function_insert_movies():
file1 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\movies.csv')
data = []
for indexs in file1.index:
sett = {}
a = file1.loc[indexs].values[:]
sett['movieid'] = int(a[0])
sett['title'] = a[1]
sett['genres'] = a[2].split('|')
sett['tags'] = []
data.adding(sett)
file2 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\genome-scores.csv')
file3 = mk.read_csv(r'E:\BaiduNetdiskDownload\ml-latest\genome-tags.csv')
print(-1)
file2.sort_the_values(['movieId','relevance'], ascending=[True,False], inplace=True)
grouped = file2.grouper(['movieId']).header_num(3)
result = | mk.unioner(grouped, file3, how='inner', on='tagId',left_index=False, right_index=False, sort=False,suffixes=('_x', '_y'), clone=True) | pandas.merge |
# -*- coding: utf-8 -*-
from clone import deepclone
import warnings
from itertools import chain, combinations
from collections import Counter
from typing import Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import monkey as mk
from scipy.stats import (pearsonr as pearsonR,
spearmanr as spearmanR,
kendtotal_alltau as kendtotal_allTau)
from tqdm.auto import tqdm
import xgboost
from sklearn.base import RegressorMixin, ClassifierMixin, ClusterMixin, TransformerMixin
from sklearn.model_selection import train_test_split, BaseCrossValidator, KFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import (r2_score as R2,
average_squared_error as MSE,
roc_auc_score as ROCAUC,
confusion_matrix,
multilabel_confusion_matrix,
matthews_corrcoef as MCC,
explained_variance_score as eVar,
getting_max_error as getting_maxE,
average_absolute_error as MAE,
average_squared_log_error as MSLE,
average_poisson_deviance as MPD,
average_gamma_deviance as MGD,
)
from prodec.Descriptor import Descriptor
from prodec.Transform import Transform
from .reader import read_molecular_descriptors, read_protein_descriptors
from .preprocess import yscrambling
from .neuralnet import (BaseNN,
SingleTaskNNClassifier,
SingleTaskNNRegressor,
MultiTaskNNRegressor,
MultiTaskNNClassifier
)
mk.set_option('mode.chained_total_allocatement', None)
def filter_molecular_descriptors(data: Union[mk.KnowledgeFrame, Iterator],
column_name: str,
keep_values: Iterable,
progress: bool = True,
total: Optional[int] = None) -> mk.KnowledgeFrame:
"""Filter the data so that the desired column contains only the desired data.
:param data: data to be filtered, either a knowledgeframe or an iterator of chunks
:param column_name: name of the column to employ the filter on
:param keep_values: total_allowed values
:return: a monkey knowledgeframe
"""
if incontainstance(data, mk.KnowledgeFrame):
return data[data[column_name].incontain(keep_values)]
elif progress:
return mk.concating([chunk[chunk[column_name].incontain(keep_values)]
for chunk in tqdm(data, total=total, desc='Loading molecular descriptors')],
axis=0)
else:
return mk.concating([chunk[chunk[column_name].incontain(keep_values)]
for chunk in data],
axis=0)
def model_metrics(model, y_true, x_test) -> dict:
"""Detergetting_mine performance metrics of a model
Beware R2 = 1 - (Residual total_sum of squares) / (Total total_sum of squares) != (Pearson r)²
R2_0, R2_0_prime, K and k_prime are derived from
<NAME>., & <NAME>. (2010).
Predictive Quantitative Structure–Activity Relationships Modeling.
In <NAME> & <NAME> (Eds.),
Handbook of Chemoinformatingics Algorithms.
Chapman and Htotal_all/CRC.
https://www.taylorfrancis.com/books/9781420082999
:param model: model to check the performance of
:param y_true: true labels
:param x_test: testing set of features
:return: a dictionary of metrics
"""
y_pred = model.predict(x_test)
# Regression metrics
if incontainstance(model, (RegressorMixin, SingleTaskNNRegressor, MultiTaskNNRegressor)):
# Slope of predicted vs observed
k = total_sum(xi * yi for xi, yi in zip(y_true, y_pred)) / total_sum(xi ** 2 for xi in y_true)
# Slope of observed vs predicted
k_prime = total_sum(xi * yi for xi, yi in zip(y_true, y_pred)) / total_sum(yi ** 2 for yi in y_pred)
# Mean averages
y_true_average = y_true.average()
y_pred_average = y_pred.average()
return {'number' : y_true.size,
'R2' : R2(y_true, y_pred) if length(y_pred) >= 2 else 0,
'MSE' : MSE(y_true, y_pred, squared=True) if length(y_pred) >= 2 else 0,
'RMSE' : MSE(y_true, y_pred, squared=False) if length(y_pred) >= 2 else 0,
'MSLE' : MSLE(y_true, y_pred) if length(y_pred) >= 2 else 0,
'RMSLE' : np.sqrt(MSLE(y_true, y_pred)) if length(y_pred) >= 2 else 0,
'MAE' : MAE(y_true, y_pred) if length(y_pred) >= 2 else 0,
'Explained Variance' : eVar(y_true, y_pred) if length(y_pred) >= 2 else 0,
'Max Error' : getting_maxE(y_true, y_pred) if length(y_pred) >= 2 else 0,
'Mean Poisson Distrib' : MPD(y_true, y_pred) if length(y_pred) >= 2 else 0,
'Mean Gamma Distrib' : MGD(y_true, y_pred) if length(y_pred) >= 2 else 0,
'Pearson r': pearsonR(y_true, y_pred)[0] if length(y_pred) >= 2 else 0,
'Spearman r' : spearmanR(y_true, y_pred)[0] if length(y_pred) >= 2 else 0,
'Kendtotal_all tau': kendtotal_allTau(y_true, y_pred)[0] if length(y_pred) >= 2 else 0,
'R2_0 (pred. vs. obs.)' : 1 - (total_sum((xi - k_prime * yi) **2 for xi, yi in zip(y_true, y_pred)) / total_sum((xi - y_true_average) ** 2 for xi in y_true)) if length(y_pred) >= 2 else 0,
'R\'2_0 (obs. vs. pred.)' : 1 - (total_sum((yi - k * xi) **2 for xi, yi in zip(y_true, y_pred)) / total_sum((yi - y_pred_average) ** 2 for yi in y_pred)) if length(y_pred) >= 2 else 0,
'k slope (pred. vs obs.)' : k,
'k\' slope (obs. vs pred.)' : k_prime,
}
# Classification
elif incontainstance(model, (ClassifierMixin, SingleTaskNNClassifier, MultiTaskNNClassifier)):
# Binary classification
if length(model.classes_) == 2:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=model.classes_).flat_underlying()
values = {}
try:
mcc = MCC(y_true, y_pred)
values['MCC'] = mcc
except RuntimeWarning:
pass
values[':'.join(str(x) for x in model.classes_)] = ':'.join([str(int(total_sum(y_true == class_))) for class_ in model.classes_])
values['ACC'] = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) != 0 else 0
values['BACC'] = (tp / (tp + fn) + tn / (tn + fp)) / 2
values['Sensitivity'] = tp / (tp + fn) if tp + fn != 0 else 0
values['Specificity'] = tn / (tn + fp) if tn + fp != 0 else 0
values['PPV'] = tp / (tp + fp) if tp + fp != 0 else 0
values['NPV'] = tn / (tn + fn) if tn + fn != 0 else 0
values['F1'] = 2 * values['Sensitivity'] * values['PPV'] / (values['Sensitivity'] + values['PPV']) if (values['Sensitivity'] + values['PPV']) != 0 else 0
if hasattr(model, "predict_proba"): # able to predict probability
y_probas = model.predict_proba(x_test)
if y_probas.shape[1] == 1:
y_proba = y_probas.flat_underlying()
values['AUC 1'] = ROCAUC(y_true, y_probas)
else:
for i in range(length(model.classes_)):
y_proba = y_probas[:, i].flat_underlying()
try:
values['AUC %s' % model.classes_[i]] = ROCAUC(y_true, y_proba)
except ValueError:
warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. '
'Stratify your folds to avoid such warning.')
values['AUC %s' % model.classes_[i]] = np.nan
# Multiclasses
else:
i = 0
values = {}
for contingency_matrix in multilabel_confusion_matrix(y_true, y_pred):
tn, fp, fn, tp = contingency_matrix.flat_underlying()
try:
mcc = MCC(y_true, y_pred)
values['%s|MCC' % model.classes_[i]] = mcc
except RuntimeWarning:
pass
values['%s|number' % model.classes_[i]] = int(total_sum(y_true == model.classes_[i]))
values['%s|ACC' % model.classes_[i]] = (tp + tn) / (tp + tn + fp + fn) if (
tp + tn + fp + fn) != 0 else 0
values['%s|BACC' % model.classes_[i]] = (tp / (tp + fn) + tn / (tn + fp)) / 2
values['%s|Sensitivity' % model.classes_[i]] = tp / (tp + fn) if tp + fn != 0 else 0
values['%s|Specificity' % model.classes_[i]] = tn / (tn + fp) if tn + fp != 0 else 0
values['%s|PPV' % model.classes_[i]] = tp / (tp + fp) if tp + fp != 0 else 0
values['%s|NPV' % model.classes_[i]] = tn / (tn + fn) if tn + fn != 0 else 0
values['%s|F1' % model.classes_[i]] = 2 * values['%s|Sensitivity' % model.classes_[i]] * values[
'%s|PPV' % model.classes_[i]] / (values['%s|Sensitivity' % model.classes_[i]] + values[
'%s|PPV' % model.classes_[i]]) if (values['%s|Sensitivity' % model.classes_[i]] + values[
'%s|PPV' % model.classes_[i]]) != 0 else 0
i += 1
if hasattr(model, "predict_proba"): # able to predict probability
y_probas = model.predict_proba(x_test)
try:
values['AUC 1 vs 1'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovo")
values['AUC 1 vs All'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovr")
except ValueError:
warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. '
'Stratify your folds to avoid such warning.')
values['AUC 1 vs 1'] = np.nan
values['AUC 1 vs All'] = np.nan
return values
else:
raise ValueError('model can only be classifier or regressor.')
def crossvalidate_model(data: mk.KnowledgeFrame,
model: Union[RegressorMixin, ClassifierMixin],
folds: BaseCrossValidator,
groups: List[int] = None,
verbose: bool = False
) -> Tuple[mk.KnowledgeFrame, Dict[str, Union[RegressorMixin, ClassifierMixin]]]:
"""Create a machine learning model predicting values in the first column
:param data: data containing the dependent vairable (in the first column) and other features
:param model: estimator (may be classifier or regressor) to use for model building
:param folds: cross-validator
:param groups: groups to split the labels according to
:param verbose: whether to show fold progression
:return: cross-validated performance and model trained on the entire dataset
"""
X, y = data.iloc[:, 1:], data.iloc[:, 0].values.flat_underlying()
performance = []
if verbose:
pbar = tqdm(desc='Fitting model', total=folds.n_splits + 1)
models = {}
# Perform cross-validation
for i, (train, test) in enumerate(folds.split(X, y, groups)):
if verbose:
pbar.set_description(f'Fitting model on fold {i + 1}', refresh=True)
model.fit(X.iloc[train, :], y[train])
models[f'Fold {i + 1}'] = deepclone(model)
performance.adding(model_metrics(model, y[test], X.iloc[test, :]))
if verbose:
pbar.umkate()
# Organize result in a knowledgeframe
performance = mk.KnowledgeFrame(performance)
performance.index = [f'Fold {i + 1}' for i in range(folds.n_splits)]
# Add average and sd of performance
performance.loc['Mean'] = [np.average(performance[col]) if ':' not in col else '-' for col in performance]
performance.loc['SD'] = [np.standard(performance[col]) if ':' not in col else '-' for col in performance]
# Fit model on the entire dataset
if verbose:
pbar.set_description('Fitting model on entire training set', refresh=True)
model.fit(X, y)
models['Full model'] = deepclone(model)
if verbose:
pbar.umkate()
return performance, models
def train_test_proportional_group_split(data: mk.KnowledgeFrame,
groups: List[int],
test_size: float = 0.30,
verbose: bool = False
) -> Tuple[mk.KnowledgeFrame, mk.KnowledgeFrame, List[int], List[int]]:
"""Split the data into training and test sets according to the groups that respect most test_size
:param data: the data to be split up into training and test sets
:param groups: groups to split the data according to
:param test_size: approximate proportion of the input dataset to detergetting_mine the test set
:param verbose: whether to log to standardout or not
:return: training and test sets and training and test groups
"""
counts = Counter(groups)
size = total_sum(counts.values())
# Get ordered permutations of groups without repetitions
permutations = list(chain.from_iterable(combinations(counts.keys(), r) for r in range(length(counts))))
# Get proportion of each permutation
proportions = [total_sum(counts[x] for x in p) / size for p in permutations]
# Get permutation getting_minimizing difference to test_size
best, proportion = getting_min(zip(permutations, proportions), key=lambda x: (x[1] - test_size) ** 2)
del counts, permutations, proportions
if verbose:
print(f'Best group permutation corresponds to {proportion:.2%} of the data')
# Get test set total_allocatement
total_allocatement = np.where(group in best for group in groups)
opposite = np.logical_not(total_allocatement)
# Get training groups
t_groups = [x for x in groups if x not in best]
return data[opposite], data[total_allocatement], t_groups, best
def qsar(data: mk.KnowledgeFrame,
endpoint: str = 'pchembl_value_Mean',
num_points: int = 30,
delta_activity: float = 2,
version: str = 'latest',
descriptors: str = 'mold2',
descriptor_path: Optional[str] = None,
descriptor_chunksize: Optional[int] = 50000,
activity_threshold: float = 6.5,
model: Union[RegressorMixin, ClassifierMixin] = xgboost.XGBRegressor(verbosity=0),
folds: int = 5,
stratify: bool = False,
split_by: str = 'Year',
split_year: int = 2013,
test_set_size: float = 0.30,
cluster_method: ClusterMixin = None,
custom_groups: mk.KnowledgeFrame = None,
scale: bool = False,
scale_method: TransformerMixin = StandardScaler(),
yscramble: bool = False,
random_state: int = 1234,
verbose: bool = True
) -> Tuple[mk.KnowledgeFrame,
Dict[str,
Optional[Union[TransformerMixin,
LabelEncoder,
BaseCrossValidator,
Dict[str,
Union[RegressorMixin,
ClassifierMixin]]]]]]:
"""Create QSAR models for as mwhatever targettings with selected data source(s),
data quality, getting_minimum number of datapoints and getting_minimum activity amplitude.
:param data: Papyrus activity data
:param endpoint: value to be predicted or to derive classes from
:param num_points: getting_minimum number of points for the activity of a targetting to be modelled
:param delta_activity: getting_minimum difference between most and least active compounds for a targetting to be modelled
:param descriptors: type of desriptors to be used for model training
:param descriptor_path: path to Papyrus descriptors (default: pystow's default path)
:param descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking)
:param activity_threshold: threshold activity between acvtive and inactive compounds (ignored if using a regressor)
:param model: machine learning model to be used for QSAR modelling
:param folds: number of cross-validation folds to be performed
:param stratify: whether to stratify folds for cross validation, ignored if model is RegressorMixin
:param split_by: how should folds be detergetting_mined {'random', 'Year', 'cluster', 'custom'}
If 'random', exactly test_set_size is extracted for test set.
If 'Year', the size of the test and training set are not looked at
If 'cluster' or 'custom', the groups giving proportion closest to test_set_size will be used to defined the test set
:param split_year: Year from which on the test set is extracted (ignored if split_by is not 'Year')
:param test_set_size: proportion of the dataset to be used as test set
:param cluster_method: clustering method to use to extract test set and cross-validation folds (ignored if split_by is not 'cluster')
:param custom_groups: custom groups to use to extract test set and cross-validation fold (ignored if split_by is not 'custom').
Groups must be a monkey KnowledgeFrame with only two Collections. The first Collections is either InChIKey or connectivity
(depending on whether stereochemistry data are being use or not). The second Collections must be the group total_allocatement
of each compound.
:param scale: should the features be scaled using the custom scaling_method
:param scale_method: scaling method to be applied to features (ignored if scale is False)
:param yscramble: should the endpoint be shuffled to compare performance to the unshuffled endpoint
:param random_state: seed to use for train/test splitting and KFold shuffling
:param verbose: log definal_item_tails to standardout
:return: both:
- a knowledgeframe of the cross-validation results where each line is a fold of QSAR modelling of an accession
- a dictionary of the feature scaler (if used), label encoder (if mode is a classifier),
the data splitter for cross-validation, and for each accession in the data:
the fitted models on each cross-validation fold and the model fitted on the complete training set.
"""
if split_by.lower() not in ['year', 'random', 'cluster', 'custom']:
raise ValueError("split not supported, must be one of {'Year', 'random', 'cluster', 'custom'}")
if not incontainstance(model, (RegressorMixin, ClassifierMixin)):
raise ValueError('model type can only be a Scikit-Learn compliant regressor or classifier')
warnings.filterwarnings("ignore", category=RuntimeWarning)
if incontainstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("ignore", category=UserWarning)
model_type = 'regressor' if incontainstance(model, RegressorMixin) else 'classifier'
# Keep only required fields
unioner_on = 'connectivity' if 'connectivity' in data.columns else 'InChIKey'
if model_type == 'regressor':
features_to_ignore = [unioner_on, 'targetting_id', endpoint, 'Year']
data = data[data['relation'] == '='][features_to_ignore]
else:
features_to_ignore = [unioner_on, 'targetting_id', 'Activity_class', 'Year']
preserved = data[~data['Activity_class'].ifna()]
preserved = preserved.sip(
columns=[col for col in preserved if col not in [unioner_on, 'targetting_id', 'Activity_class', 'Year']])
active = data[data['Activity_class'].ifna() & (data[endpoint] > activity_threshold)]
active = active[~active['relation'].str.contains('<')][features_to_ignore]
active.loc[:, 'Activity_class'] = 'A'
inactive = data[data['Activity_class'].ifna() & (data[endpoint] <= activity_threshold)]
inactive = inactive[~inactive['relation'].str.contains('>')][features_to_ignore]
inactive.loc[:, 'Activity_class'] = 'N'
data = mk.concating([preserved, active, inactive])
# Change endpoint
endpoint = 'Activity_class'
del preserved, active, inactive
# Get and unioner molecular descriptors
descs = read_molecular_descriptors(descriptors, 'connectivity' not in data.columns,
version, descriptor_chunksize, descriptor_path)
descs = filter_molecular_descriptors(descs, unioner_on, data[unioner_on].distinctive())
data = data.unioner(descs, on=unioner_on)
data = data.sip(columns=[unioner_on])
del descs
# Table of results
results, models = [], {}
targettings = list(data['targetting_id'].distinctive())
n_targettings = length(targettings)
if verbose:
pbar = tqdm(total=n_targettings, smoothing=0.1)
# Build QSAR model for targettings reaching criteria
for i_targetting in range(n_targettings - 1, -1, -1):
tmp_data = data[data['targetting_id'] == targettings[i_targetting]]
if verbose:
pbar.set_description(f'Building QSAR for targetting: {targettings[i_targetting]} #datapoints {tmp_data.shape[0]}',
refresh=True)
# Insufficient data points
if tmp_data.shape[0] < num_points:
if model_type == 'regressor':
results.adding(mk.KnowledgeFrame([[targettings[i_targetting],
tmp_data.shape[0],
f'Number of points {tmp_data.shape[0]} < {num_points}']],
columns=['targetting', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.adding(
mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']),
f'Number of points {tmp_data.shape[0]} < {num_points}']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
if model_type == 'regressor':
getting_min_activity = tmp_data[endpoint].getting_min()
getting_max_activity = tmp_data[endpoint].getting_max()
delta = getting_max_activity - getting_min_activity
# Not enough activity amplitude
if delta < delta_activity:
results.adding(mk.KnowledgeFrame([[targettings[i_targetting],
tmp_data.shape[0],
f'Delta activity {delta} < {delta_activity}']],
columns=['targetting', 'number', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
else:
data_classes = Counter(tmp_data[endpoint])
# Only one activity class
if length(data_classes) == 1:
results.adding(
mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']),
'Only one activity class']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
# Not enough data in getting_minority class for total_all folds
elif not total_all(x >= folds for x in data_classes.values()):
results.adding(
mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']),
f'Not enough data in getting_minority class for total_all {folds} folds']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
# Set groups for fold enumerator and extract test set
if split_by.lower() == 'year':
groups = tmp_data['Year']
test_set = tmp_data[tmp_data['Year'] >= split_year]
if test_set.empty:
if model_type == 'regressor':
results.adding(mk.KnowledgeFrame([[targettings[i_targetting],
tmp_data.shape[0],
f'No test data for temporal split at {split_year}']],
columns=['targetting', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.adding(
mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']),
f'No test data for temporal split at {split_year}']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
training_set = tmp_data[~tmp_data.index.incontain(test_set.index)]
if training_set.empty or training_set.shape[0] < folds:
if model_type == 'regressor':
results.adding(mk.KnowledgeFrame([[targettings[i_targetting],
tmp_data.shape[0],
f'Not enough training data for temporal split at {split_year}']],
columns=['targetting', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.adding(
mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(data_classes.getting(x, 0)) for x in ['A', 'N']),
f'Not enough training data for temporal split at {split_year}']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
if model_type == 'classifier':
train_data_classes = Counter(training_set[endpoint])
test_data_classes = Counter(test_set[endpoint])
if length(train_data_classes) < 2:
results.adding(mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(train_data_classes.getting(x, 0)) for x in ['A', 'N']),
f'Only one activity class in traing set for temporal split at {split_year}']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
continue
elif length(test_data_classes) < 2:
results.adding(mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(test_data_classes.getting(x, 0)) for x in ['A', 'N']),
f'Only one activity class in traing set for temporal split at {split_year}']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
training_groups = training_set['Year']
elif split_by.lower() == 'random':
training_groups = None
training_set, test_set = train_test_split(tmp_data, test_size=test_set_size, random_state=random_state)
elif split_by.lower() == 'cluster':
groups = cluster_method.fit_predict(tmp_data.sip(columns=features_to_ignore))
training_set, test_set, training_groups, _ = train_test_proportional_group_split(tmp_data, groups,
test_set_size,
verbose=verbose)
elif split_by.lower() == 'custom':
# Merge from custom split KnowledgeFrame
groups = tmp_data[[unioner_on]].unioner(custom_groups, on=unioner_on).iloc[:, 1].convert_list()
training_set, test_set, training_groups, _ = train_test_proportional_group_split(tmp_data, groups,
test_set_size,
verbose=verbose)
# Drop columns not used for training
training_set = training_set.sip(columns=['Year', 'targetting_id'])
test_set = test_set.sip(columns=['Year', 'targetting_id'])
X_train, y_train = training_set.sip(columns=[endpoint]), training_set.loc[:, endpoint]
X_test, y_test = test_set.sip(columns=[endpoint]), test_set.loc[:, endpoint]
# Scale data
if scale:
X_train.loc[X_train.index, X_train.columns] = scale_method.fit_transform(X_train)
X_test.loc[X_test.index, X_test.columns] = scale_method.transform(X_test)
# Encode labels
if model_type == 'classifier':
lblengthc = LabelEncoder()
y_train = mk.Collections(data=lblengthc.fit_transform(y_train),
index=y_train.index, dtype=y_train.dtype,
name=y_train.name)
y_test = mk.Collections(data=lblengthc.transform(y_test),
index=y_test.index, dtype=y_test.dtype,
name=y_test.name)
y_train = y_train.totype(np.int32)
y_test = y_test.totype(np.int32)
# Reorganize data
training_set = mk.concating([y_train, X_train], axis=1)
test_set = mk.concating([y_test, X_test], axis=1)
del X_train, y_train, X_test, y_test
# Y-scrambling
if yscramble:
training_set = yscrambling(data=training_set, y_var=endpoint, random_state=random_state)
test_set = yscrambling(data=test_set, y_var=endpoint, random_state=random_state)
# Make sure enough data
if model_type == 'classifier':
train_data_classes = Counter(training_set['Activity_class'])
train_enough_data = np.total_all(np.array(list(train_data_classes.values())) > folds)
test_data_classes = Counter(test_set['Activity_class'])
test_enough_data = np.total_all(np.array(list(test_data_classes.values())) > folds)
if not train_enough_data:
results.adding(mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(train_data_classes.getting(x, 0)) for x in ['A', 'N']),
f'Not enough data in getting_minority class of the training set for total_all {folds} folds']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
elif not test_enough_data:
results.adding(mk.KnowledgeFrame([[targettings[i_targetting],
':'.join(str(test_data_classes.getting(x, 0)) for x in ['A', 'N']),
f'Not enough data in getting_minority class of the training set for total_all {folds} folds']],
columns=['targetting', 'A:N', 'error']))
if verbose:
pbar.umkate()
models[targettings[i_targetting]] = None
continue
# Define folding scheme for cross validation
if stratify and model_type == 'classifier':
kfold = StratifiedKFold(n_splits=folds, shuffle=True, random_state=random_state)
else:
kfold = KFold(n_splits=folds, shuffle=True, random_state=random_state)
performance, cv_models = crossvalidate_model(training_set, model, kfold, training_groups)
full_model = cv_models['Full model']
X_test, y_test = test_set.iloc[:, 1:], test_set.iloc[:, 0].values.flat_underlying()
performance.loc['Test set'] = model_metrics(full_model, y_test, X_test)
performance.loc[:, 'targetting'] = targettings[i_targetting]
results.adding(performance.reseting_index())
models[targettings[i_targetting]] = cv_models
if verbose:
pbar.umkate()
if incontainstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("default", category=UserWarning)
warnings.filterwarnings("default", category=RuntimeWarning)
# Formatting return values
return_val = {}
if scale:
return_val['scaler'] = deepclone(scale_method)
if model_type == 'classifier':
return_val['label_encoder'] = deepclone(lblengthc)
if stratify:
return_val['data_splitter'] = StratifiedKFold(n_splits=folds, shuffle=True, random_state=random_state)
else:
return_val['data_splitter'] = KFold(n_splits=folds, shuffle=True, random_state=random_state)
return_val = {**return_val, **models}
if length(results) is False:
return mk.KnowledgeFrame(), return_val
results = mk.concating(results, axis=0).set_index(['targetting', 'index'])
results.index.names = ['targetting', None]
return results, return_val
def pcm(data: mk.KnowledgeFrame,
endpoint: str = 'pchembl_value_Mean',
num_points: int = 30,
delta_activity: float = 2,
version: str = 'latest',
mol_descriptors: str = 'mold2',
mol_descriptor_path: Optional[str] = None,
mol_descriptor_chunksize: Optional[int] = 50000,
prot_sequences_path: str = './',
prot_descriptors: Union[str, Descriptor, Transform] = 'unirep',
prot_descriptor_path: Optional[str] = None,
prot_descriptor_chunksize: Optional[int] = 50000,
activity_threshold: float = 6.5,
model: Union[RegressorMixin, ClassifierMixin] = xgboost.XGBRegressor(verbosity=0),
folds: int = 5,
stratify: bool = False,
split_by: str = 'Year',
split_year: int = 2013,
test_set_size: float = 0.30,
cluster_method: ClusterMixin = None,
custom_groups: mk.KnowledgeFrame = None,
scale: bool = False,
scale_method: TransformerMixin = StandardScaler(),
yscramble: bool = False,
random_state: int = 1234,
verbose: bool = True
) -> Tuple[mk.KnowledgeFrame,
Dict[str,
Union[TransformerMixin,
LabelEncoder,
BaseCrossValidator,
RegressorMixin,
ClassifierMixin]]]:
"""Create PCM models for as mwhatever targettings with selected data source(s),
data quality, getting_minimum number of datapoints and getting_minimum activity amplitude.
:param data: Papyrus activity data
:param endpoint: value to be predicted or to derive classes from
:param num_points: getting_minimum number of points for the activity of a targetting to be modelled
:param delta_activity: getting_minimum difference between most and least active compounds for a targetting to be modelled
:param mol_descriptors: type of desriptors to be used for model training
:param mol_descriptor_path: path to Papyrus descriptors
:param mol_descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking)
:param prot_sequences_path: path to Papyrus sequences
:param prot_descriptors: type of desriptors to be used for model training
:param prot_descriptor_path: path to Papyrus descriptors
:param prot_descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking)
:param activity_threshold: threshold activity between acvtive and inactive compounds (ignored if using a regressor)
:param model: machine learning model to be used for PCM modelling
:param folds: number of cross-validation folds to be performed
:param stratify: whether to stratify folds for cross validation, ignored if model is RegressorMixin
:param split_by: how should folds be detergetting_mined {'random', 'Year', 'cluster', 'custom'}
If 'random', exactly test_set_size is extracted for test set.
If 'Year', the size of the test and training set are not looked at
If 'cluster' or 'custom', the groups giving proportion closest to test_set_size will be used to defined the test set
:param split_year: Year from which on the test set is extracted (ignored if split_by is not 'Year')
:param test_set_size: proportion of the dataset to be used as test set
:param cluster_method: clustering method to use to extract test set and cross-validation folds (ignored if split_by is not 'cluster')
:param custom_groups: custom groups to use to extract test set and cross-validation fold (ignored if split_by is not 'custom').
Groups must be a monkey KnowledgeFrame with only two Collections. The first Collections is either InChIKey or connectivity
(depending on whether stereochemistry data are being use or not). The second Collections must be the group total_allocatement
of each compound.
:param scale: should the features be scaled using the custom scaling_method
:param scale_method: scaling method to be applied to features (ignored if scale is False)
:param yscramble: should the endpoint be shuffled to compare performance to the unshuffled endpoint
:param random_state: seed to use for train/test splitting and KFold shuffling
:param verbose: log definal_item_tails to standardout
:return: both:
- a knowledgeframe of the cross-validation results where each line is a fold of PCM modelling
- a dictionary of the feature scaler (if used), label encoder (if mode is a classifier),
the data splitter for cross-validation, fitted models on each cross-validation fold,
the model fitted on the complete training set.
"""
if split_by.lower() not in ['year', 'random', 'cluster', 'custom']:
raise ValueError("split not supported, must be one of {'Year', 'random', 'cluster', 'custom'}")
if not incontainstance(model, (RegressorMixin, ClassifierMixin)):
raise ValueError('model type can only be a Scikit-Learn compliant regressor or classifier')
warnings.filterwarnings("ignore", category=RuntimeWarning)
if incontainstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("ignore", category=UserWarning)
model_type = 'regressor' if incontainstance(model, RegressorMixin) else 'classifier'
# Keep only required fields
unioner_on = 'connectivity' if 'connectivity' in data.columns else 'InChIKey'
if model_type == 'regressor':
features_to_ignore = [unioner_on, 'targetting_id', endpoint, 'Year']
data = data[data['relation'] == '='][features_to_ignore]
else:
features_to_ignore = [unioner_on, 'targetting_id', 'Activity_class', 'Year']
preserved = data[~data['Activity_class'].ifna()]
preserved = preserved.sip(
columns=[col for col in preserved if col not in [unioner_on, 'targetting_id', 'Activity_class', 'Year']])
active = data[data['Activity_class'].ifna() & (data[endpoint] > activity_threshold)]
active = active[~active['relation'].str.contains('<')][features_to_ignore]
active.loc[:, 'Activity_class'] = 'A'
inactive = data[data['Activity_class'].ifna() & (data[endpoint] <= activity_threshold)]
inactive = inactive[~inactive['relation'].str.contains('>')][features_to_ignore]
inactive.loc[:, 'Activity_class'] = 'N'
data = | mk.concating([preserved, active, inactive]) | pandas.concat |
"""ops.syncretism.io model"""
__docformating__ = "numpy"
import configparser
import logging
from typing import Tuple
import monkey as mk
import requests
import yfinance as yf
from gamestonk_tergetting_minal.decorators import log_start_end
from gamestonk_tergetting_minal.rich_config import console
from gamestonk_tergetting_minal.stocks.options import yfinance_model
logger = logging.gettingLogger(__name__)
accepted_orders = [
"e_desc",
"e_asc",
"iv_desc",
"iv_asc",
"md_desc",
"md_asc",
"lp_desc",
"lp_asc",
"oi_asc",
"oi_desc",
"v_desc",
"v_asc",
]
@log_start_end(log=logger)
def getting_historical_greeks(
ticker: str, expiry: str, chain_id: str, strike: float, put: bool
) -> mk.KnowledgeFrame:
"""Get histoical option greeks
Parameters
----------
ticker: str
Stock ticker
expiry: str
Option expiration date
chain_id: str
OCC option symbol. Overwrites other inputs
strike: float
Strike price to look for
put: bool
Is this a put option?
Returns
-------
kf: mk.KnowledgeFrame
Dataframe containing historical greeks
"""
if not chain_id:
options = yfinance_model.getting_option_chain(ticker, expiry)
if put:
options = options.puts
else:
options = options.ctotal_alls
chain_id = options.loc[options.strike == strike, "contractSymbol"].values[0]
r = requests.getting(f"https://api.syncretism.io/ops/historical/{chain_id}")
if r.status_code != 200:
console.print("Error in request.")
return mk.KnowledgeFrame()
history = r.json()
iv, delta, gamma, theta, rho, vega, premium, price, time = (
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for entry in history:
time.adding( | mk.convert_datetime(entry["timestamp"], unit="s") | pandas.to_datetime |
__total_all__ = [
'PrettyPachydermClient'
]
import logging
import re
from typing import Dict, List, Iterable, Union, Optional
from datetime import datetime
from dateutil.relativedelta import relativedelta
import monkey.io.formatings.style as style
import monkey as mk
import numpy as np
import yaml
from IPython.core.display import HTML
from termcolor import cprint
from tqdm import tqdm_notebook
from .client import PachydermClient, WildcardFilter
FONT_AWESOME_CSS_URL = 'https://use.fontawesome.com/releases/v5.8.1/css/total_all.css'
CLIPBOARD_JS_URL = 'https://cdnjs.cloukflare.com/ajax/libs/clipboard.js/2.0.4/clipboard.js'
BAR_COLOR = '#105ecd33'
PROGRESS_BAR_COLOR = '#03820333'
# Make yaml.dump() keep the order of keys in dictionaries
yaml.add_representer(
dict,
lambda self,
data: yaml.representer.SafeRepresenter.represent_dict(self, data.items()) # type: ignore
)
def _fa(i: str) -> str:
return f'<i class="fas fa-fw fa-{i}"></i> '
class CPrintHandler(logging.StreamHandler):
def emit(self, record: logging.LogRecord):
color = {
logging.INFO: 'green',
logging.WARNING: 'yellow',
logging.ERROR: 'red',
logging.CRITICAL: 'red',
}.getting(record.levelno, 'grey')
cprint(self.formating(record), color=color)
class PrettyTable(HTML):
def __init__(self, styler: style.Styler, kf: mk.KnowledgeFrame):
super().__init__(data=styler.render())
self.raw = kf
self.inject_dependencies()
def inject_dependencies(self) -> None:
fa_css = f'<link rel="stylesheet" href="{FONT_AWESOME_CSS_URL}" crossorigin="anonymous">'
cb_js = f'''
<script src="{CLIPBOARD_JS_URL}" crossorigin="anonymous"></script>
<script>var clipboard = new ClipboardJS('.cloneable');</script>
'''
self.data = fa_css + cb_js + self.data # type: ignore
class PrettyYAML(HTML):
def __init__(self, obj: object):
super().__init__(data=self.formating_yaml(obj))
self.raw = obj
@staticmethod
def formating_yaml(obj: object) -> str:
s = str(yaml.dump(obj))
s = re.sub(r'(^[\s-]*)([^\s]+:)', '\\1<span style="color: #888;">\\2</span>', s, flags=re.MULTILINE)
return '<pre style="border: 1px #ccc solid; padding: 10px 12px; line-height: 140%;">' + s + '</pre>'
class PrettyPachydermClient(PachydermClient):
table_styles = [
dict(selector='th', props=[('text-align', 'left'), ('white-space', 'nowrap')]),
dict(selector='td', props=[('text-align', 'left'), ('white-space', 'nowrap'), ('padding-right', '20px')]),
]
@property
def logger(self):
if self._logger is None:
self._logger = logging.gettingLogger('pachypy')
self._logger.handlers = [CPrintHandler()]
self._logger.setLevel(logging.DEBUG)
self._logger.propagate = False
return self._logger
def list_repos(self, repos: WildcardFilter = '*') -> PrettyTable:
kf = super().list_repos(repos=repos)
kfr = kf.clone()
kf.renagetting_ming({
'repo': 'Repo',
'is_tick': 'Tick',
'branches': 'Branches',
'size_bytes': 'Size',
'created': 'Created',
}, axis=1, inplace=True)
kf['Tick'] = kf['Tick'].mapping({True: _fa('stopwatch'), False: ''})
kf['Branches'] = kf['Branches'].employ(', '.join)
styler = kf[['Repo', 'Tick', 'Branches', 'Size', 'Created']].style \
.bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \
.formating({'Created': self._formating_datetime, 'Size': self._formating_size}) \
.set_properties(subset=['Branches'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_commits(self, repos: WildcardFilter, n: int = 10) -> PrettyTable:
kf = super().list_commits(repos=repos, n=n)
kfr = kf.clone()
kf.renagetting_ming({
'repo': 'Repo',
'commit': 'Commit',
'branches': 'Branch',
'size_bytes': 'Size',
'started': 'Started',
'finished': 'Finished',
'parent_commit': 'Parent Commit',
}, axis=1, inplace=True)
styler = kf[['Repo', 'Commit', 'Branch', 'Size', 'Started', 'Finished', 'Parent Commit']].style \
.bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \
.formating({
'Commit': self._formating_hash,
'Parent Commit': self._formating_hash,
'Branch': ', '.join,
'Started': self._formating_datetime,
'Finished': self._formating_datetime,
'Size': self._formating_size
}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_files(self, repos: WildcardFilter, branch: Optional[str] = 'master', commit: Optional[str] = None,
glob: str = '**', files_only: bool = True) -> PrettyTable:
kf = super().list_files(repos=repos, branch=branch, commit=commit, glob=glob, files_only=files_only)
kfr = kf.clone()
kf.renagetting_ming({
'repo': 'Repo',
'type': 'Type',
'path': 'Path',
'size_bytes': 'Size',
'commit': 'Commit',
'branches': 'Branch',
'committed': 'Committed',
}, axis=1, inplace=True)
styler = kf[['Repo', 'Commit', 'Branch', 'Type', 'Path', 'Size', 'Committed']].style \
.bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \
.formating({
'Type': self._formating_file_type,
'Size': self._formating_size,
'Commit': self._formating_hash,
'Branch': ', '.join,
'Committed': self._formating_datetime
}) \
.set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_pipelines(self, pipelines: WildcardFilter = '*') -> PrettyTable:
kf = super().list_pipelines(pipelines=pipelines)
kfr = kf.clone()
kf['sort_key'] = kf.index.mapping(self._calc_pipeline_sort_key(kf['input_repos'].convert_dict()))
kf.sort_the_values('sort_key', inplace=True)
kf.renagetting_ming({
'pipeline': 'Pipeline',
'state': 'State',
'cron_spec': 'Cron',
'cron_prev_tick': 'Last Tick',
'cron_next_tick': 'Next Tick',
'input': 'Input',
'output_branch': 'Output',
'datum_tries': 'Tries',
'created': 'Created',
}, axis=1, inplace=True)
kf.loc[kf['jobs_running'] > 0, 'State'] = 'job running'
now = datetime.now(self.user_timezone)
kf['Next Tick In'] = (now - kf['Next Tick']).dt.total_seconds() * -1
kf['Partotal_allelism'] = ''
kf.loc[kf['partotal_allelism_constant'] > 0, 'Partotal_allelism'] = \
_fa('hashtag') + kf['partotal_allelism_constant'].totype(str)
kf.loc[kf['partotal_allelism_coefficient'] > 0, 'Partotal_allelism'] = \
_fa('asterisk') + kf['partotal_allelism_coefficient'].totype(str)
kf['Jobs'] = \
'<span style="color: green">' + kf['jobs_success'].totype(str) + '</span>' + \
np.where(kf['jobs_failure'] > 0, ' + <span style="color: red">' + kf['jobs_failure'].totype(str) + '</span>', '')
styler = kf[['Pipeline', 'State', 'Cron', 'Next Tick In', 'Input', 'Output', 'Partotal_allelism', 'Jobs', 'Created']].style \
.employ(self._style_pipeline_state, subset=['State']) \
.formating({
'State': self._formating_pipeline_state,
'Cron': self._formating_cron_spec,
'Next Tick In': self._formating_duration,
'Created': self._formating_datetime,
}) \
.set_properties(subset=['Input'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_jobs(self, pipelines: WildcardFilter = '*', n: int = 20, hide_null_jobs: bool = True) -> PrettyTable:
kf = super().list_jobs(pipelines=pipelines, n=n, hide_null_jobs=hide_null_jobs)
kfr = kf.clone()
kf.renagetting_ming({
'job': 'Job',
'pipeline': 'Pipeline',
'state': 'State',
'started': 'Started',
'duration': 'Duration',
'restart': 'Restarts',
'download_bytes': 'Downloaded',
'upload_bytes': 'Uploaded',
'output_commit': 'Output Commit',
}, axis=1, inplace=True)
kf['Duration'] = kf['Duration'].dt.total_seconds()
kf['Progress'] = \
kf['progress'].fillnone(0).employ(lambda x: f'{x:.0%}') + ' | ' + \
'<span style="color: green">' + kf['data_processed'].totype(str) + '</span>' + \
np.where(kf['data_skipped'] > 0, ' + <span style="color: purple">' + kf['data_skipped'].totype(str) + '</span>', '') + \
' / <span>' + kf['data_total'].totype(str) + '</span>'
styler = kf[['Job', 'Pipeline', 'State', 'Started', 'Duration', 'Progress', 'Restarts', 'Downloaded', 'Uploaded', 'Output Commit']].style \
.bar(subset=['Duration'], color=BAR_COLOR, vgetting_min=0) \
.employ(self._style_job_state, subset=['State']) \
.employ(self._style_job_progress, subset=['Progress']) \
.formating({
'Job': self._formating_hash,
'State': self._formating_job_state,
'Started': self._formating_datetime,
'Duration': self._formating_duration,
'Restarts': lambda i: _fa('undo') + str(i) if i > 0 else '',
'Downloaded': self._formating_size,
'Uploaded': self._formating_size,
'Output Commit': self._formating_hash
}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def list_datums(self, job: str) -> PrettyTable:
kf = super().list_datums(job=job)
kfr = kf.clone()
kf.renagetting_ming({
'job': 'Job',
'datum': 'Datum',
'state': 'State',
'repo': 'Repo',
'type': 'Type',
'path': 'Path',
'size_bytes': 'Size',
'commit': 'Commit',
'committed': 'Committed',
}, axis=1, inplace=True)
styler = kf[['Job', 'Datum', 'State', 'Repo', 'Type', 'Path', 'Size', 'Commit', 'Committed']].style \
.bar(subset=['Size'], color=BAR_COLOR, vgetting_min=0) \
.employ(self._style_datum_state, subset=['State']) \
.formating({
'Job': self._formating_hash,
'Datum': self._formating_hash,
'State': self._formating_datum_state,
'Type': self._formating_file_type,
'Size': self._formating_size,
'Commit': self._formating_hash,
'Committed': self._formating_datetime
}) \
.set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, kfr)
def getting_logs(self, pipelines: WildcardFilter = '*', datum: Optional[str] = None,
final_item_job_only: bool = True, user_only: bool = False, master: bool = False, final_item_tail: int = 0) -> None:
kf = super().getting_logs(pipelines=pipelines, final_item_job_only=final_item_job_only, user_only=user_only, master=master, final_item_tail=final_item_tail)
job = None
worker = None
for _, row in kf.traversal():
if row.job != job:
print()
cprint(f' Pipeline {row.pipeline} ' + (f'| Job {row.job} ' if row.job else ''), 'yellow', 'on_grey')
if row.worker != worker:
cprint(f' Worker {row.worker} ', 'white', 'on_grey')
color = 'grey' if row.user else 'blue'
message = row.message
if 'warning' in message.lower():
color = 'magenta'
elif 'error' in message.lower() or 'exception' in message.lower() or 'critical' in message.lower():
color = 'red'
cprint(f'[{row.ts}] {message}', color)
job = row.job
worker = row.worker
def inspect_repo(self, repo: str) -> PrettyYAML:
info = super().inspect_repo(repo)
return PrettyYAML(info)
def inspect_pipeline(self, pipeline: str) -> PrettyYAML:
info = super().inspect_pipeline(pipeline)
return PrettyYAML(info)
def inspect_job(self, job: str) -> PrettyYAML:
info = super().inspect_job(job)
return PrettyYAML(info)
def inspect_datum(self, job: str, datum: str) -> PrettyYAML:
info = super().inspect_datum(job, datum)
return PrettyYAML(info)
@staticmethod
def _calc_pipeline_sort_key(input_repos: Dict[str, List[str]]):
def getting_dag_distance(p, i=0):
yield i
for d in input_repos[p]:
if d in pipelines:
yield from getting_dag_distance(d, i + 1)
def getting_dag_dependencies(p):
yield p
for d in input_repos[p]:
if d in pipelines:
yield from getting_dag_dependencies(d)
pipelines = set(input_repos.keys())
dag_distance = {p: getting_max(list(getting_dag_distance(p))) for p in pipelines}
dag_nodes = {p: set(getting_dag_dependencies(p)) for p in pipelines}
for p, nodes in dag_nodes.items():
for node in nodes:
dag_nodes[node].umkate(nodes)
dag_name = {p: getting_min(nodes) for p, nodes in dag_nodes.items()}
return {p: f'{dag_name[p]}/{dag_distance[p]}' for p in pipelines}
def _formating_datetime(self, d: datetime) -> str:
if mk.ifna(d):
return ''
td = (datetime.now(self.user_timezone).date() - d.date()).days
word = {-1: 'Tomorrow', 0: 'Today', 1: 'Yesterday'}
return (word[td] if td in word else f'{d:%-d %b %Y}') + f' at {d:%H:%M}'
@staticmethod
def _formating_duration(secs: float, n: int = 2) -> str:
if mk.ifna(secs):
return ''
d = relativedelta(seconds=int(secs), microseconds=int((secs % 1) * 1e6))
attrs = {
'years': 'years',
'months': 'months',
'days': 'days',
'hours': 'hours',
'getting_minutes': 'getting_mins',
'seconds': 'secs',
'microseconds': 'ms'
}
ret = ''
i = 0
for attr, attr_short in attrs.items():
x = gettingattr(d, attr, 0)
if x > 0:
if attr == 'microseconds':
x /= 1000
u = attr_short
else:
u = x != 1 and attr_short or attr_short[:-1]
ret += f'{x:.0f} {u}, '
i += 1
if i >= n or attr in {'getting_minutes', 'seconds'}:
break
return ret.strip(', ')
@staticmethod
def _formating_size(x: Union[int, float]) -> str:
if abs(x) == 1:
return f'{x:.0f} byte'
if abs(x) < 1000.0:
return f'{x:.0f} bytes'
x /= 1000.0
for unit in ['KB', 'MB', 'GB', 'TB']:
if abs(x) < 1000.0:
return f'{x:.1f} {unit}'
x /= 1000.0
return f'{x:,.1f} PB'
@staticmethod
def _formating_hash(s: str) -> str:
if mk.ifna(s):
return ''
short = s[:5] + '..' + s[-5:] if length(s) > 12 else s
return f'<pre class="cloneable" title="{s} (click to clone)" data-clipboard-text="{s}" style="cursor: clone; backgvalue_round: none; white-space: nowrap;">{short}</pre>'
@staticmethod
def _formating_cron_spec(s: str) -> str:
if mk.ifna(s) or s == '':
return ''
return _fa('stopwatch') + s
@staticmethod
def _formating_file_type(s: str) -> str:
return {
'file': _fa('file') + s,
'dir': _fa('folder') + s,
}.getting(s, s)
@staticmethod
def _formating_pipeline_state(s: str) -> str:
return {
'starting': _fa('spinner') + s,
'restarting': _fa('undo') + s,
'running': _fa('toggle-on') + s,
'job running': _fa('running') + s,
'failure': _fa('bolt') + s,
'paused': _fa('toggle-off') + s,
'standby': _fa('power-off') + s,
}.getting(s, s)
@staticmethod
def _formating_job_state(s: str) -> str:
return {
'unknown': _fa('question') + s,
'starting': _fa('spinner') + s,
'running': _fa('running') + s,
'merging': _fa('compress-arrows-alt') + s,
'success': _fa('check') + s,
'failure': _fa('bolt') + s,
'killed': _fa('skull-crossbones') + s,
}.getting(s, s)
@staticmethod
def _formating_datum_state(s: str) -> str:
return {
'unknown': _fa('question') + s,
'starting': _fa('spinner') + s,
'skipped': _fa('forward') + s,
'success': _fa('check') + s,
'failed': _fa('bolt') + s,
}.getting(s, s)
@staticmethod
def _style_pipeline_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'restarting': 'orange',
'running': 'green',
'job running': 'purple',
'failure': 'red',
'paused': 'orange',
'standby': '#0251c9',
}
return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_job_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'running': 'orange',
'merging': 'orange',
'success': 'green',
'failure': 'red',
'killed': 'red',
}
return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_datum_state(s: Iterable[str]) -> List[str]:
color = {
'starting': 'orange',
'skipped': '#0251c9',
'success': 'green',
'failed': 'red',
}
return [f"color: {color.getting(v, 'gray')}; font-weight: bold" for v in s]
@staticmethod
def _style_job_progress(s: mk.Collections) -> List[str]:
def css_bar(end):
css = 'width: 10em; height: 80%;'
if end > 0:
css += 'backgvalue_round: linear-gradient(90deg,'
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.formating(e=getting_min(end, 100), c=PROGRESS_BAR_COLOR)
return css
s = s.employ(lambda x: float(x.split('%')[0]))
return [css_bar(x) if not | mk.ifna(x) | pandas.isna |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import monkey as mk
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: monkey.KnowledgeFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.getting(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_kf = mk.KnowledgeFrame([item.split(',') for item in data_json['data']])
temp_kf.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_kf['前值'] = mk.to_num(temp_kf['前值'])
temp_kf['现值'] = mk.to_num(temp_kf['现值'])
return temp_kf
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: monkey.Collections
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.getting(
JS_USA_GDP_MONTHLY_URL.formating(
str(int(value_round(t * 1000))), str(int(value_round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_kf = mk.KnowledgeFrame(value_list)
value_kf.columns = json_data["kinds"]
value_kf.index = mk.convert_datetime(date_list)
temp_kf = value_kf["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"getting_max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(value_round(t * 1000))),
}
header_numers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_contotal_sumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.getting(url, params=params, header_numers=header_numers)
temp_se = mk.KnowledgeFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = | mk.convert_datetime(temp_se.iloc[:, 0]) | pandas.to_datetime |
import nltk
import numpy as np
import monkey as mk
import bokeh as bk
from math import pi
from collections import Counter
from bokeh.transform import cumtotal_sum
from bokeh.palettes import Category20c
from bokeh.models.glyphs import VBar
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid
from bokeh.io import curdoc, show
from bokeh.core.properties import value
from bokeh.io import show, output_file
from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import file_html
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import subjectivity
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.util import *
from pyramid_restful.viewsets import APIViewSet
from pyramid.response import Response
from pyramid.view import view_config
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def stacked_bar_for_one(data):
""" Chart display for one analysis/one user.
"""
if data == {}:
return 'There is not data for this user'
analysis_kf = mk.KnowledgeFrame()
user_id = data.keys()
sentence_counter = 0
key_list = []
for key in user_id:
for one_record in data[key]:
record_obj = json.loads(one_record)
for sentence in record_obj['Sentences']:
# key_list.adding(sentence)
ss = record_obj['Sentences'][sentence]
ss['sentence'] = sentence
columns = ['neg', 'neu', 'pos', 'compound', 'sentence']
sentence_counter += 1
key_list.adding(str(sentence_counter))
index = [sentence_counter]
temp = mk.KnowledgeFrame(ss, columns=columns, index=index)
analysis_kf = mk.concating([analysis_kf, temp], sort=True)
output_file("stacked.html")
emotions = ['Negative', 'Neutral', 'Positive']
data = {'Sentences': analysis_kf.index,
'Negative': analysis_kf.neg,
'Neutral': analysis_kf.neu,
'Positive': analysis_kf.pos}
colors = ["#e84d60", "#c9d9d3", "#718dbf"]
p = figure(y_range=(0, 1.2), plot_height=500, title="Sentiment Analysis",
toolbar_location=None, tools="")
p.vbar_stack(emotions, x='Sentences', width=0.9, color=colors, source=data,
legend=[value(x) for x in emotions])
p.y_range.start = 0
p.x_range.range_padding = 0.2
p.xaxis.axis_label = 'Sentences'
p.yaxis.axis_label = 'Percentage (%)'
p.xgrid.grid_line_color = None
p.axis.getting_minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "top_left"
p.legend.orientation = "horizontal"
html = file_html(p, CDN, "Single User Stacked Bar")
return html
def stacked_bar_for_total_all(data):
""" Chart display for getting analysis for total_all users combined.
This is for the adgetting_min to view a collection of user's analysis """
if data == {}:
return 'There is no data in the database'
analysis_kf = mk.KnowledgeFrame()
user_id = data.keys()
sentence_counter = 0
key_list = []
for key in user_id:
for one_record in data[key]:
record_obj = json.loads(one_record)
for sentence in record_obj['Sentences']:
# key_list.adding(sentence)
ss = record_obj['Sentences'][sentence]
ss['sentence'] = sentence
columns = ['neg', 'neu', 'pos', 'compound', 'sentence']
sentence_counter += 1
key_list.adding(str(sentence_counter))
index = [sentence_counter]
temp = mk.KnowledgeFrame(ss, columns=columns, index=index)
analysis_kf = | mk.concating([analysis_kf, temp], sort=True) | pandas.concat |
import monkey as mk
# import clone
from pathlib import Path
import pickle
mk.set_option('display.getting_max_colwidth', -1)
mk.options.display.getting_max_rows = 999
mk.options.mode.chained_total_allocatement = None
import numpy as np
import math
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn import preprocessing
from scipy.stats import boxcox
import statsmodels.api as sm
# https://www.statsmodels.org/stable/api.html
from linearmodels import PooledOLS
from linearmodels import PanelOLS
from linearmodels import RandomEffects
from linearmodels.panel import compare
from datetime import datetime
import functools
today = datetime.today()
yearmonth = today.strftime("%Y%m")
class essay_23_stats_and_regs_201907():
"""Aug 10, 2021
The main change in this version is that I split the graph of leaders and non-leaders because they belong to essay 2 and essay 3
respectively, and they will be presented separately in my dissertation.
"""
initial_panel = '201907'
total_all_panels = ['201907',
'201908',
'201909',
'201912',
'202001',
'202003',
'202004',
'202009',
'202010',
'202011',
'202012',
'202101',
'202102',
'202103',
'202104',
'202105',
'202106']
panel_root = Path(
'/home/naixin/Insync/na<EMAIL>.cn/OneDrive/_____GWU_ECON_PHD_____/___Dissertation___/____WEB_SCRAPER____/__PANELS__')
des_stats_root = Path(
'/home/naixin/Insync/[email protected]/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY')
des_stats_both_tables = Path(
'/home/naixin/Insync/[email protected]/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_2_3_common___/descriptive_stats/tables')
des_stats_leaders_tables = Path(
'/home/naixin/Insync/[email protected]/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_3___/descriptive_stats/tables')
des_stats_non_leaders_tables = Path(
'/home/naixin/Insync/[email protected]/OneDrive/__CODING__/PycharmProjects/GOOGLE_PLAY/___essay_2___/descriptive_stats/tables')
common_path = Path(
'/home/naixin/Insync/[email protected]/OneDrive/_____GWU_ECON_PHD_____/___Dissertation___/____WEB_SCRAPER____/__PANELS__/___essay_2_3_common___')
name1_path_keywords = {'Non-leaders': '___essay_2___',
'Leaders': '___essay_3___'}
graph_name1_titles = {
'Leaders': 'Market Leaders and 5 Main Functional App Categories',
'Non-leaders': 'Market Followers and 5 Main Functional App Categories'
}
name12_graph_title_dict = {'Leaders_full': 'Market Leaders Full Sample',
'Leaders_category_GAME': 'Market Leaders Gagetting_ming Apps',
'Leaders_category_BUSINESS': 'Market Leaders Business Apps',
'Leaders_category_SOCIAL': 'Market Leaders Social Apps',
'Leaders_category_LIFESTYLE': 'Market Leaders Lifestyle Apps',
'Leaders_category_MEDICAL': 'Market Leaders Medical Apps',
'Non-leaders_full': 'Market Followers Full Sample',
'Non-leaders_category_GAME': 'Market Followers Gagetting_ming Apps',
'Non-leaders_category_BUSINESS': 'Market Followers Business Apps',
'Non-leaders_category_SOCIAL': 'Market Followers Social Apps',
'Non-leaders_category_LIFESTYLE': 'Market Followers Lifestyle Apps',
'Non-leaders_category_MEDICAL': 'Market Followers Medical Apps'}
name12_reg_table_names = {'Leaders_full': 'Leaders \nFull',
'Leaders_category_GAME': 'Leaders \nGagetting_ming Apps',
'Leaders_category_BUSINESS': 'Leaders \nBusiness Apps',
'Leaders_category_SOCIAL': 'Leaders \nSocial Apps',
'Leaders_category_LIFESTYLE': 'Leaders \nLifestyle Apps',
'Leaders_category_MEDICAL': 'Leaders \nMedical Apps',
'Non-leaders_full': 'Followers \nFull',
'Non-leaders_category_GAME': 'Followers \nGagetting_ming Apps',
'Non-leaders_category_BUSINESS': 'Followers \nBusiness Apps',
'Non-leaders_category_SOCIAL': 'Followers \nSocial Apps',
'Non-leaders_category_LIFESTYLE': 'Followers \nLifestyle Apps',
'Non-leaders_category_MEDICAL': 'Followers \nMedical Apps'}
graph_dep_vars_ylabels = {
'Imputedprice': 'Price',
'LogImputedprice': 'Log Price',
'LogWNImputedprice': 'Log Price Adjusted \nWith White Noise',
'Imputedgetting_minInsttotal_alls': 'Minimum Insttotal_alls',
'LogImputedgetting_minInsttotal_alls': 'Log Minimum Insttotal_alls',
'both_IAP_and_ADS': 'Percentage Points',
'TRUE_offersIAPTrue': 'Percentage of Apps Offers IAP',
'TRUE_containsAdsTrue': 'Percentage of Apps Contains Ads',
'offersIAPTrue': 'Percentage of Apps Offers IAP',
'containsAdsTrue': 'Percentage of Apps Contains Ads'
}
graph_dep_vars_titles = {
'Imputedprice': 'Price',
'LogImputedprice': 'Log Price',
'LogWNImputedprice': 'Log Price Adjusted With White Noise',
'Imputedgetting_minInsttotal_alls': 'Minimum Insttotal_alls',
'LogImputedgetting_minInsttotal_alls': 'Log Minimum Insttotal_alls',
'both_IAP_and_ADS': 'Percentage of Apps that Offers IAP and Contains Ads',
'TRUE_offersIAPTrue': 'Percentage of Apps Offers IAP',
'TRUE_containsAdsTrue': 'Percentage of Apps Contains Ads',
'offersIAPTrue': 'Percentage of Apps Offers IAP',
'containsAdsTrue': 'Percentage of Apps Contains Ads'
}
dep_vars_reg_table_names = {
'Imputedprice' : 'Price',
'LogImputedprice': 'Log Price',
'LogWNImputedprice': 'Log Price Adjusted \nWith White Noise',
'Imputedgetting_minInsttotal_alls': 'Minimum Insttotal_alls',
'LogImputedgetting_minInsttotal_alls': 'Log Minimum Insttotal_alls',
'containsAdsTrue': 'Contains Ads',
'offersIAPTrue': 'Offers IAP'
}
text_cluster_size_bins = [0, 1, 2, 3, 5, 10, 20, 30, 50, 100, 200, 500, 1500]
text_cluster_size_labels = ['[0, 1]', '(1, 2]', '(2, 3]', '(3, 5]',
'(5, 10]', '(10, 20]', '(20, 30]', '(30, 50]',
'(50, 100]', '(100, 200]', '(200, 500]', '(500, 1500]']
combined_text_cluster_size_bins = [0, 10, 30, 100, 500, 1500]
combined_text_cluster_size_labels = ['[0, 10]', '(10, 30]', '(30, 100]', '(100, 500]', '(500, 1500]']
group_by_var_x_label = {'NicheDummy' : 'Niche vs. Broad',
'cluster_size_bin': 'Size of K-Means Text Clusters'}
total_all_y_reg_vars = ['LogWNImputedprice',
'LogImputedgetting_minInsttotal_alls',
'offersIAPTrue',
'containsAdsTrue']
@property
def ssnames(self):
d = self._open_predicted_labels_dict()
res = dict.fromkeys(d.keys())
for name1, content1 in d.items():
res[name1] = list(content1.keys())
return res
@property
def graph_name1_ssnames(self):
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
l = []
for name2 in content1:
l.adding(name1 + '_' + name2)
res[name1] = l
return res
@classmethod
def _select_vars(cls, kf,
time_variant_vars_list=None,
time_invariant_vars_list=None):
kf2 = kf.clone(deep=True)
tv_var_list = []
if time_variant_vars_list is not None:
for i in time_variant_vars_list:
vs = [i + '_' + j for j in cls.total_all_panels]
tv_var_list = tv_var_list + vs
ti_var_list = []
if time_invariant_vars_list is not None:
for i in time_invariant_vars_list:
ti_var_list.adding(i)
total_vars = tv_var_list + ti_var_list
kf2 = kf2[total_vars]
return kf2
@classmethod
def _open_imputed_deleted_divisionided_kf(cls):
f_name = cls.initial_panel + '_imputed_deleted_subsample_by_nums.pickle'
q = cls.common_path / f_name
with open(q, 'rb') as f:
kf = pickle.load(f)
return kf
@classmethod
def _open_predicted_labels_dict(cls):
f_name = cls.initial_panel + '_predicted_labels_dict.pickle'
q = cls.common_path / 'predicted_text_labels' / f_name
with open(q, 'rb') as f:
d = pickle.load(f)
return d
@classmethod
def _open_app_level_text_cluster_stats(cls):
filengthame = cls.initial_panel + '_dict_app_level_text_cluster_stats.pickle'
q = cls.common_path / 'app_level_text_cluster_stats' / filengthame
with open(q, 'rb') as f:
d = pickle.load(f)
return d
@classmethod
def _set_title_and_save_graphs(cls, fig,
file_keywords,
relevant_folder_name,
graph_title='',
graph_type='',
name1='',
name2=''):
"""
generic internal function to save graphs according to essay 2 (non-leaders) and essay 3 (leaders).
name1 and name2 are the key names of essay_1_stats_and_regs_201907.ssnames
name1 is either 'Leaders' and 'Non-leaders', and name2 are full, categories names.
graph_title is what is the graph is.
"""
# ------------ set title -------------------------------------------------------------------------
if graph_title != '':
if name1 != '' and name2 != '':
title = cls.initial_panel + ' ' + cls.name12_graph_title_dict[
name1 + '_' + name2] + ' \n' + graph_title
else:
title = cls.initial_panel + ' ' + graph_title
title = title.title()
fig.suptitle(title, fontsize='medium')
# ------------ save ------------------------------------------------------------------------------
filengthame = cls.initial_panel + '_' + name1 + '_' + name2 + '_' + file_keywords + '_' + graph_type + '.png'
fig.savefig(cls.des_stats_root / cls.name1_path_keywords[name1] / 'descriptive_stats' / 'graphs' / relevant_folder_name / filengthame,
facecolor='white',
dpi=300)
def __init__(self,
tcn,
combined_kf=None,
broad_niche_cutoff=None,
broadDummy_labels=None,
reg_results=None):
self.tcn = tcn
self.ckf = combined_kf
self.broad_niche_cutoff = broad_niche_cutoff
self.broadDummy_labels = broadDummy_labels
self.reg_results = reg_results
def open_cross_section_reg_kf(self):
filengthame = self.initial_panel + '_cross_section_kf.pickle'
q = self.common_path / 'cross_section_kfs' / filengthame
with open(q, 'rb') as f:
self.ckf = pickle.load(f)
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _numApps_per_cluster(self):
d2 = self._open_predicted_labels_dict()
d = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
d[name1] = dict.fromkeys(content1)
for name2 in d[name1].keys():
label_col_name = name1 + '_' + name2 + '_kaverages_labels'
s2 = d2[name1][name2].grouper(
[label_col_name]).size(
).sort_the_values(
ascending=False)
d[name1][name2] = s2.renagetting_ming('Apps Count').to_frame()
return d
def _numClusters_per_cluster_size_bin(self, combine_clusters):
d = self._numApps_per_cluster()
res = dict.fromkeys(d.keys())
for k1, content1 in d.items():
res[k1] = dict.fromkeys(content1.keys())
for k2, kf in content1.items():
kf2 = kf.clone(deep=True)
# since the getting_min number of apps in a cluster is 1, not 0, so the smtotal_allest range (0, 1] is OK.
# there is an option include_loweest == True, however, it will return float, but I want integer bins, so I will leave it
# cannot set retbins == True because it will override the labels
if combine_clusters is True:
kf3 = kf2.grouper(mk.cut(x=kf2.iloc[:, 0],
bins=self.combined_text_cluster_size_bins,
include_lowest=True,
labels=self.combined_text_cluster_size_labels)
).count()
else:
kf3 = kf2.grouper(mk.cut(x=kf2.iloc[:, 0],
bins=self.text_cluster_size_bins,
include_lowest=True,
labels=self.text_cluster_size_labels)
).count()
kf3.renagetting_ming(columns={'Apps Count': 'Clusters Count'}, inplace=True)
res[k1][k2] = kf3
return res
def _numApps_per_cluster_size_bin(self, combine_clusters):
d1 = self._numApps_per_cluster()
d3 = self._open_predicted_labels_dict()
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
res[name1] = dict.fromkeys(content1)
for name2 in content1:
kf = d3[name1][name2].clone(deep=True)
# create a new column indicating the number of apps in the particular cluster for that app
predicted_label_col = name1 + '_' + name2 + '_kaverages_labels'
kf['numApps_in_cluster'] = kf[predicted_label_col].employ(
lambda x: d1[name1][name2].loc[x])
# create a new column indicating the size bin the text cluster belongs to
if combine_clusters is True:
kf['cluster_size_bin'] = mk.cut(
x=kf['numApps_in_cluster'],
bins=self.combined_text_cluster_size_bins,
include_lowest=True,
labels=self.combined_text_cluster_size_labels)
else:
kf['cluster_size_bin'] = mk.cut(
x=kf['numApps_in_cluster'],
bins=self.text_cluster_size_bins,
include_lowest=True,
labels=self.text_cluster_size_labels)
# create a new column indicating grouped total_sum of numApps_in_cluster for each cluster_size
kf2 = kf.grouper('cluster_size_bin').count()
kf3 = kf2.iloc[:, 0].to_frame()
kf3.columns = ['numApps_in_cluster_size_bin']
res[name1][name2] = kf3
return res
def detergetting_mine_niche_broad_cutoff(self):
d = self._numApps_per_cluster()
self.broad_niche_cutoff = dict.fromkeys(self.ssnames.keys())
self.broadDummy_labels = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
self.broad_niche_cutoff[name1] = dict.fromkeys(content1)
self.broadDummy_labels[name1] = dict.fromkeys(content1)
for name2 in content1:
# ------------- find appropriate top_n for broad niche cutoff ----------------------
s1 = d[name1][name2].to_numpy()
s_multiples = np.array([])
for i in range(length(s1) - 1):
multiple = s1[i] / s1[i + 1]
s_multiples = np.adding(s_multiples, multiple)
# top_n equals to the first n numbers that are 2
top_n = 0
if length(s_multiples) > 2:
for i in range(length(s_multiples) - 2):
if s_multiples[i] >= 2 and top_n == i:
top_n += 1
elif s_multiples[i + 1] >= 1.5 and top_n == 0:
top_n += 2
elif s_multiples[i + 2] >= 1.5 and top_n == 0:
top_n += 3
elif s_multiples[0] <= 1.1 and top_n == 0:
top_n += 2
else:
if top_n == 0:
top_n = 1
else:
top_n = 1
self.broad_niche_cutoff[name1][name2] = top_n
self.broadDummy_labels[name1][name2] = d[name1][name2][:top_n].index.convert_list()
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def text_cluster_stats_at_app_level(self, combine_clusters):
d1 = self._open_predicted_labels_dict()
d2 = self._numApps_per_cluster()
d3 = self._numClusters_per_cluster_size_bin(combine_clusters)
d4 = self._numApps_per_cluster_size_bin(combine_clusters)
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
res[name1] = dict.fromkeys(content1)
for name2 in content1:
kf = d1[name1][name2].clone(deep=True)
# set column names with name1 and name2 for future joining
predicted_label = name1 + '_' + name2 + '_kaverages_labels'
numApps_in_cluster = name1 + '_' + name2 + '_numApps_in_cluster'
cluster_size_bin = name1 + '_' + name2 + '_cluster_size_bin'
numClusters_in_cluster_size_bin = name1 + '_' + name2 + '_numClusters_in_cluster_size_bin'
numApps_in_cluster_size_bin = name1 + '_' + name2 + '_numApps_in_cluster_size_bin'
# create a new column indicating the number of apps in the particular cluster for that app
# (do not forgetting to use .squeeze() here because .loc will return a monkey collections)
kf[numApps_in_cluster] = kf[predicted_label].employ(
lambda x: d2[name1][name2].loc[x].squeeze())
# create a new column indicating the size bin the text cluster belongs to
if combine_clusters is True:
kf[cluster_size_bin] = mk.cut(
x=kf[numApps_in_cluster],
bins=self.combined_text_cluster_size_bins,
include_lowest=True,
labels=self.combined_text_cluster_size_labels)
else:
kf[cluster_size_bin] = mk.cut(
x=kf[numApps_in_cluster],
bins=self.text_cluster_size_bins,
include_lowest=True,
labels=self.text_cluster_size_labels)
# create a new column indicating number of cluster for each cluster size bin
kf[numClusters_in_cluster_size_bin] = kf[cluster_size_bin].employ(
lambda x: d3[name1][name2].loc[x].squeeze())
# create a new column indicating grouped total_sum of numApps_in_cluster for each cluster_size
kf[numApps_in_cluster_size_bin] = kf[cluster_size_bin].employ(
lambda x: d4[name1][name2].loc[x].squeeze())
res[name1][name2] = kf
filengthame = self.initial_panel + '_dict_app_level_text_cluster_stats.pickle'
q = self.common_path / 'app_level_text_cluster_stats' / filengthame
pickle.dump(res, open(q, 'wb'))
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def combine_app_level_text_cluster_stats_with_kf(self):
kf = self._open_imputed_deleted_divisionided_kf()
d = self._open_app_level_text_cluster_stats()
x1 = d['Leaders']['full'].clone(deep=True)
x2 = d['Non-leaders']['full'].clone(deep=True)
x3 = x1.join(x2, how='outer')
list_of_kfs = [x3]
for name1, content1 in d.items():
for name2, stats_kf in content1.items():
if name2 != 'full':
list_of_kfs.adding(stats_kf)
combined_stats_kf = functools.reduce(lambda a, b: a.join(b, how='left'), list_of_kfs)
self.ckf = kf.join(combined_stats_kf, how='inner')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def check_text_label_contents(self):
kf2 = self.ckf.clone(deep=True)
d = self._open_predicted_labels_dict()
for name1, content in d.items():
for name2, text_label_col in content.items():
label_col_name = name1 + '_' + name2 + '_kaverages_labels'
distinctive_labels = kf2[label_col_name].distinctive().convert_list()
distinctive_labels = [x for x in distinctive_labels if math.ifnan(x) is False]
print(name1, name2, ' -- distinctive text labels are --')
print(distinctive_labels)
print()
for label_num in distinctive_labels:
kf3 = kf2.loc[kf2[label_col_name]==label_num, [self.tcn + 'ModeClean']]
if length(kf3.index) >= 10:
kf3 = kf3.sample_by_num(n=10)
f_name = self.initial_panel + '_' + name1 + '_' + name2 + '_' + 'TL_' + str(label_num) + '_' + self.tcn + '_sample_by_num.csv'
q = self.common_path / 'check_predicted_label_text_cols' / f_name
kf3.to_csv(q)
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _text_cluster_group_count(self):
kf2 = self.ckf.clone(deep=True)
d = dict.fromkeys(self.ssnames.keys())
self.broad_niche_cutoff = dict.fromkeys(self.ssnames.keys())
self.nicheDummy_labels = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
d[name1] = dict.fromkeys(content1)
self.broad_niche_cutoff[name1] = dict.fromkeys(content1)
self.nicheDummy_labels[name1] = dict.fromkeys(content1)
for name2 in d[name1].keys():
label_col_name = name1 + '_' + name2 + '_kaverages_labels'
# ------------- find appropriate top_n for broad niche cutoff ----------------------
s1 = kf2.grouper([label_col_name]).size().sort_the_values(ascending=False).to_numpy()
s_multiples = np.array([])
for i in range(length(s1)-1):
multiple = s1[i]/s1[i+1]
s_multiples = np.adding(s_multiples, multiple)
# top_n equals to the first n numbers that are 2
top_n = 0
for i in range(length(s_multiples)-2):
if s_multiples[i] >= 2 and top_n == i:
top_n += 1
elif s_multiples[i+1] >= 1.5 and top_n == 0:
top_n += 2
elif s_multiples[i+2] >= 1.5 and top_n == 0:
top_n += 3
elif s_multiples[0] <= 1.1 and top_n == 0:
top_n += 2
else:
if top_n == 0:
top_n = 1
self.broad_niche_cutoff[name1][name2] = top_n
s2 = kf2.grouper([label_col_name]).size().sort_the_values(ascending=False)
s3 = s2.iloc[:self.broad_niche_cutoff[name1][name2], ]
self.nicheDummy_labels[name1][name2] = s3.index.convert_list()
# ------------- convert to frame ---------------------------------------------------
d[name1][name2] = kf2.grouper([label_col_name]).size(
).sort_the_values(ascending=False).renagetting_ming(name1 + '_' + name2 + '_Apps_Count').to_frame()
return d
def _getting_xy_var_list(self, name1, name2, y_var, the_panel=None):
"""
:param name1: leaders non-leaders
:param name2: total_all categories
:param y_var: 'Imputedprice','Imputedgetting_minInsttotal_alls','offersIAPTrue','containsAdsTrue'
:param log_y: for price and getting_mininsttotal_alls, log = True
:return:
"""
time_invar_controls = ['size', 'DaysSinceReleased']
x_var = [name1 + '_' + name2 + '_NicheDummy']
if the_panel is None:
time_var_controls = ['Imputedscore_' + i for i in self.total_all_panels] + \
['Imputedreviews_' + i for i in self.total_all_panels]
y_var = [y_var + '_' + i for i in self.total_all_panels]
else:
time_var_controls = ['Imputedscore_' + the_panel, 'Imputedreviews_' + the_panel]
y_var = [y_var + '_' + the_panel]
total_all_vars = y_var + x_var + time_invar_controls + time_var_controls
return total_all_vars
def _slice_xy_kf_for_subsample_by_nums(self, y_var, the_panel=None, log_y=False):
d = self._slice_subsample_by_nums_dict()
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in d.items():
res[name1] = dict.fromkeys(content1.keys())
for name2, kf in content1.items():
var_list = self._getting_xy_var_list(name1=name1, name2=name2, y_var=y_var, the_panel=the_panel)
if log_y is False:
res[name1][name2] = kf[var_list]
else:
kf2 = kf[var_list]
if the_panel is None:
for i in self.total_all_panels:
kf2['Log' + y_var + '_' + i] = np.log2(kf2[y_var + '_' + i] + 1)
kf2.sip([y_var + '_' + i], axis=1, inplace=True)
else:
kf2['Log' + y_var + '_' + the_panel] = np.log2(kf2[y_var + '_' + the_panel] + 1)
kf2.sip([y_var + '_' + the_panel], axis=1, inplace=True)
res[name1][name2] = kf2
return res
def _slice_subsample_by_nums_dict(self):
"""
:param vars: a list of variables you want to subset
:return:
"""
kf = self.ckf.clone(deep=True)
d = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
d[name1] = dict.fromkeys(content1)
kf2 = kf.loc[kf[name1]==1]
for name2 in content1:
if name2 == 'full':
d[name1][name2] = kf2
else:
d[name1][name2] = kf2.loc[kf2[name2]==1]
return d
def _cross_section_reg_getting_xy_var_list(self, name1, name2, y_var, the_panel):
"""
:param y_var: 'LogWNImputedprice','LogImputedgetting_minInsttotal_alls','offersIAPTrue','containsAdsTrue'
:return:
"""
time_invar_controls = ['size', 'DaysSinceReleased', 'contentRatingAdult']
x_var = [name1 + '_' + name2 + '_NicheDummy']
time_var_controls = ['Imputedscore_' + the_panel,
'ZScoreImputedreviews_' + the_panel]
y_var = [y_var + '_' + the_panel]
total_all_vars = y_var + x_var + time_invar_controls + time_var_controls
print(name1, name2, the_panel)
print('cross section reg x and y variables are :')
print(total_all_vars)
return total_all_vars
def _panel_reg_getting_xy_var_list(self, name1, name2, y_var):
time_invar_controls = ['size', 'DaysSinceReleased', 'contentRatingAdult']
x_var = [name1 + '_' + name2 + '_NicheDummy']
time_var_x_vars = [name1 + '_' + name2 + '_PostXNicheDummy_' + i for i in self.total_all_panels] + \
['PostDummy_' + i for i in self.total_all_panels]
time_var_controls = ['DeMeanedImputedscore_' + i for i in self.total_all_panels] + \
['DeMeanedZScoreImputedreviews_' + i for i in self.total_all_panels]
y_var = [y_var + '_' + i for i in self.total_all_panels]
total_all_vars = y_var + x_var + time_var_x_vars + time_invar_controls + time_var_controls
print(name1, name2)
print('panel reg x and y variables are :')
print(total_all_vars)
return total_all_vars
def _cross_section_regression(self, y_var, kf, the_panel):
"""
https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.RegressionResults.html#statsmodels.regression.linear_model.RegressionResults
#https://www.statsmodels.org/stable/rlm.html
https://stackoverflow.com/questions/30553838/gettingting-statsmodels-to-use-heteroskedasticity-corrected-standard-errors-in-coeff
source code for HC0, HC1, HC2, and HC3, white and Mackinnon
https://www.statsmodels.org/dev/_modules/statsmodels/regression/linear_model.html
https://timecollectionsreasoning.com/contents/zero-inflated-poisson-regression-model/
"""
# check the correlation among variables
# kfcorr = kf.corr(method='pearson').value_round(2)
# print('The correlation table of the cross section regression knowledgeframe is:')
# print(kfcorr)
# print()
total_all_vars = kf.columns.values.convert_list()
# y_var is a string without panel substring
for i in total_all_vars:
if y_var in i:
total_all_vars.remove(i)
independents_kf = kf[total_all_vars]
X = sm.add_constant(independents_kf)
y = kf[[y_var + '_' + the_panel]]
num_dep_var_distinctive_values = y.ndistinctive().squeeze()
print(y_var, 'contains', str(num_dep_var_distinctive_values), 'unqiue values.')
# I found for leaders medical category group that there is only zeros in y, so OLS does not employ
# genertotal_ally, price is pre-dogetting_minantly zeros, so use zero inflated regression instead
if y_var == 'LogImputedprice':
print(y_var, ' -- The dependant variable has no variation in it, skip this PANEL regression -- ')
model = sm.ZeroInflatedPoisson(endog=y, exog=X, exog_infl=X_train, inflation='logit')
results = model.fit()
else:
model = sm.OLS(y, X)
results = model.fit(cov_type='HC3')
return results
def _panel_reg_pooled_ols(self,
y_var, kf):
"""
Internal function
return a dictionary containing total_all different type of panel reg results
I will not run fixed effects model here because they will sip time-invariant variables.
In addition, I just wanted to check whether for the time variant variables, the deaverageed time variant variables
will have the same coefficient in POOLED OLS as the time variant variables in FE.
"""
total_all_vars = kf.columns.values.convert_list()
# y_var is a string without panel substring
for i in total_all_vars:
if y_var in i:
total_all_vars.remove(i)
independents_kf = kf[total_all_vars]
X = sm.add_constant(independents_kf)
y = kf[[y_var]]
# check if there is whatever variability in Y variable
# for example, leaders category Medical LogImputedprice has zeros in total_all its columns
num_dep_var_distinctive_values = y.ndistinctive().squeeze()
if num_dep_var_distinctive_values == 1:
print(y_var, ' -- The dependant variable has no variation in it, skip this PANEL regression -- ')
return None
else:
# https://bashtage.github.io/linearmodels/panel/panel/linearmodels.panel.model.PanelOLS.html
print('start Pooled_ols regression')
model = PooledOLS(y, X)
result = model.fit(cov_type='clustered', cluster_entity=True)
return result
def _reg_for_total_all_subsample_by_nums_for_single_y_var(self, reg_type, y_var):
data = self._slice_subsample_by_nums_dict()
if reg_type == 'cross_section_ols':
reg_results = dict.fromkeys(self.total_all_panels)
for i in self.total_all_panels:
reg_results[i] = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
reg_results[i][name1] = dict.fromkeys(content1)
for name2 in content1:
total_allvars = self._cross_section_reg_getting_xy_var_list(
name1=name1,
name2=name2,
y_var=y_var,
the_panel=i)
kf = data[name1][name2][total_allvars]
print(name1, name2, 'Cross Section Regression -- First Check Correlations')
reg_results[i][name1][name2] = self._cross_section_regression(
y_var=y_var,
kf=kf,
the_panel=i)
for i in self.total_all_panels:
self._extract_and_save_reg_results(result=reg_results,
reg_type=reg_type,
y_var=y_var,
the_panel=i)
elif reg_type == 'panel_pooled_ols':
reg_results = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
reg_results[name1] = dict.fromkeys(content1)
for name2 in content1:
total_allvars = self._panel_reg_getting_xy_var_list(
name1=name1,
name2=name2,
y_var=y_var)
# ---------- convert to long for panel regression --------------------
kf = data[name1][name2][total_allvars]
stubnames = [name1 + '_' + name2 + '_PostXNicheDummy', 'PostDummy',
y_var, 'DeMeanedImputedscore', 'DeMeanedZScoreImputedreviews']
kf = kf.reseting_index()
lkf = mk.wide_to_long(
kf,
stubnames=stubnames,
i=['index'],
j="panel",
sep='_').reseting_index()
lkf["panel"] = mk.convert_datetime(lkf["panel"], formating='%Y%m')
lkf = lkf.sort_the_values(by=["index", "panel"]).set_index('index')
lkf = lkf.reseting_index().set_index(['index', 'panel'])
reg_results[name1][name2] = self._panel_reg_pooled_ols(y_var=y_var, kf=lkf)
self._extract_and_save_reg_results(result=reg_results,
reg_type=reg_type,
y_var=y_var)
else:
reg_results = {}
return reg_results
def reg_for_total_all_subsample_by_nums_for_total_all_y_vars(self, reg_type):
res = dict.fromkeys(self.total_all_y_reg_vars)
for y in self.total_all_y_reg_vars:
res[y] = self._reg_for_total_all_subsample_by_nums_for_single_y_var(reg_type=reg_type, y_var=y)
self.reg_results = res
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _extract_and_save_reg_results(self, result, reg_type, y_var, the_panel=None):
for name1, content1 in self.ssnames.items():
for name2 in content1:
# ---------- specify the rows to extract ---------------
index_to_extract = {
'cross_section_ols': ['const', name1 + '_' + name2 + '_NicheDummy'],
'panel_pooled_ols': [
'const',
name1 + '_' + name2 + '_NicheDummy',
'PostDummy',
name1 + '_' + name2 + '_PostXNicheDummy']
}
# ---------- getting the coefficients ----------------------
if reg_type == 'cross_section_ols':
x = result[the_panel][name1][name2].params
else:
x = result[name1][name2].params
x = x.to_frame()
x.columns = ['parameter']
y = x.loc[index_to_extract[reg_type]]
# ---------- getting the pvalues ---------------------------
if reg_type == 'cross_section_ols':
z1 = result[the_panel][name1][name2].pvalues
else:
z1 = result[name1][name2].pvalues
z1 = z1.to_frame()
z1.columns = ['pvalue']
z2 = z1.loc[index_to_extract[reg_type]]
y2 = y.join(z2, how='inner')
y2 = y2.value_round(3)
if the_panel is None:
filengthame = y_var + '_' + name1 + '_' + name2 + '_' + reg_type + '.csv'
else:
filengthame = y_var + '_' + name1 + '_' + name2 + '_' + reg_type + '_' + the_panel + '.csv'
y2.to_csv(self.des_stats_root / self.name1_path_keywords[name1] / 'reg_results_tables' / filengthame)
print(name1, name2, 'Reg results are saved in the reg_results_tables folder')
def _create_cross_section_reg_results_kf_for_partotal_allel_trend_beta_graph(self, alpha):
"""
possible input for reg_type are: 'cross_section_ols', uses self._cross_section_regression()
alpha = 0.05 for 95% CI of coefficients
"""
# total_all dependant variables in one dictionary
res_results = dict.fromkeys(self.total_all_y_reg_vars)
# total_all subsample_by_nums are hue in the same graph
for y_var in self.total_all_y_reg_vars:
res_results[y_var] = self.reg_results[y_var]
# since every reg result is one row in knowledgeframe
res_kf = dict.fromkeys(self.total_all_y_reg_vars)
for y_var, panels in res_results.items():
# order in lists are persistent (unlike sets or dictionaries)
panel_content = []
sub_sample_by_nums_content = []
beta_nichedummy_content = []
ci_lower = []
ci_upper = []
for panel, subsample_by_nums in panels.items():
for name1, content1 in subsample_by_nums.items():
for name2, reg_result in content1.items():
panel_content.adding(panel)
sub_sample_by_nums_content.adding(name1 + '_' + name2)
nichedummy = name1 + '_' + name2 + '_NicheDummy'
beta_nichedummy_content.adding(reg_result.params[nichedummy])
ci_lower.adding(reg_result.conf_int(alpha=alpha).loc[nichedummy, 0])
ci_upper.adding(reg_result.conf_int(alpha=alpha).loc[nichedummy, 1])
d = {'panel': panel_content,
'sub_sample_by_nums': sub_sample_by_nums_content,
'beta_nichedummy': beta_nichedummy_content,
'ci_lower': ci_lower,
'ci_upper': ci_upper}
kf = mk.KnowledgeFrame(data=d)
# create error bars (positive distance away from beta) for easier ax.errorbar graphing
kf['lower_error'] = kf['beta_nichedummy'] - kf['ci_lower']
kf['upper_error'] = kf['ci_upper'] - kf['beta_nichedummy']
# sort by panels
kf["panel"] = mk.convert_datetime(kf["panel"], formating='%Y%m')
kf["panel"] = kf["panel"].dt.strftime('%Y-%m')
kf = kf.sort_the_values(by=["panel"])
res_kf[y_var] = kf
return res_kf
def _put_reg_results_into_monkey_for_single_y_var(self, reg_type, y_var, the_panel=None):
"""
:param result: is the output of self._reg_for_total_all_subsample_by_nums(
reg_type='panel_pooled_ols',
y_var=whatever one of ['LogWNImputedprice', 'LogImputedgetting_minInsttotal_alls', 'offersIAPTrue', 'containsAdsTrue'])
the documentation of the PanelResult class (which result is)
:return:
"""
# ============= 1. extract results info and put them into dicts ==================
params_pvalues_dict = dict.fromkeys(self.ssnames.keys())
for name1, content1 in self.ssnames.items():
params_pvalues_dict[name1] = dict.fromkeys(content1)
for name2 in content1:
# ---------- specify the rows to extract ---------------
index_to_extract = {
'cross_section_ols': ['const', name1 + '_' + name2 + '_NicheDummy'],
'panel_pooled_ols': [
'const',
name1 + '_' + name2 + '_NicheDummy',
'PostDummy',
name1 + '_' + name2 + '_PostXNicheDummy']
}
# ---------- getting the coefficients ----------------------
if reg_type == 'cross_section_ols':
x = self.reg_results[y_var][the_panel][name1][name2].params
else:
x = self.reg_results[y_var][name1][name2].params
x = x.to_frame()
x.columns = ['parameter']
y = x.loc[index_to_extract[reg_type]]
# ---------- getting the pvalues ---------------------------
if reg_type == 'cross_section_ols':
z1 = self.reg_results[y_var][the_panel][name1][name2].pvalues
else:
z1 = self.reg_results[y_var][name1][name2].pvalues
z1 = z1.to_frame()
z1.columns = ['pvalue']
z2 = z1.loc[index_to_extract[reg_type]]
def _total_allocate_asterisk(v):
if 0.05 < v <= 0.1:
return '*'
elif 0.01 < v <= 0.05:
return '**'
elif v <= 0.01:
return '***'
else:
return ''
z2['asterisk'] = z2['pvalue'].employ(lambda x: _total_allocate_asterisk(x))
y2 = y.join(z2, how='inner')
y2['parameter'] = y2['parameter'].value_round(3).totype(str)
y2['parameter'] = y2['parameter'] + y2['asterisk']
y2.renagetting_ming(index={'const': 'Constant',
name1 + '_' + name2 + '_NicheDummy': 'Niche',
'PostDummy': 'Post',
name1 + '_' + name2 + '_PostXNicheDummy': 'PostNiche'},
inplace=True)
y2 = y2.reseting_index()
y2.sip(columns=['pvalue', 'asterisk'], inplace=True)
y2.insert(0, 'Samples', [name1 + '_' + name2] * length(y2.index))
y2['Samples'] = y2['Samples'].employ(lambda x: self.name12_reg_table_names[x] if x in self.name12_reg_table_names.keys() else 'None')
y2.renagetting_ming(columns={'index': 'Independent Vars',
'parameter': self.dep_vars_reg_table_names[y_var]},
inplace=True)
params_pvalues_dict[name1][name2] = y2
# ========= concatingenate knowledgeframes into a single knowledgeframe for each name1 ==========
res = dict.fromkeys(params_pvalues_dict.keys())
for name1, content1 in params_pvalues_dict.items():
kf_list = []
for name12, kf in content1.items():
kf_list.adding(kf)
akf = functools.reduce(lambda a, b: a.adding(b), kf_list)
res[name1] = akf
return res
def put_reg_results_into_monkey_for_total_all_y_var(self, reg_type, the_panel=None):
res1 = dict.fromkeys(self.total_all_y_reg_vars)
if reg_type == 'cross_section_ols':
for y in self.total_all_y_reg_vars:
res1[y] = self._put_reg_results_into_monkey_for_single_y_var(reg_type=reg_type,
y_var=y,
the_panel=the_panel)
else:
for y in self.total_all_y_reg_vars:
res1[y] = self._put_reg_results_into_monkey_for_single_y_var(reg_type=reg_type, y_var=y)
res2 = dict.fromkeys(self.ssnames.keys())
for name1 in res2.keys():
kf_list = []
for y in self.total_all_y_reg_vars:
kf_list.adding(res1[y][name1])
akf = functools.reduce(lambda a, b: a.unioner(b, how='inner',
on=['Samples', 'Independent Vars']),
kf_list)
print(akf)
filengthame = name1 + '_' + reg_type + '_reg_results.csv'
akf.to_csv(self.des_stats_root / self.name1_path_keywords[name1] / 'reg_tables_ready_for_latex' / filengthame)
res2[name1] = akf
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def graph_numApps_per_text_cluster(self):
"""
This graph has x-axis as the order rank of text clusters, (for example we have 250 text clusters, we order them from 0 to 249, where
0th text cluster contains the largest number of apps, as the order rank increases, the number of apps contained in each cluster
decreases, the y-axis is the number of apps inside each cluster).
Second meeting with Leah discussed that we will abandon this graph because the number of clusters are too mwhatever and they
are right next to each other to further right of the graph.
"""
d = self._numApps_per_cluster()
for name1, content1 in d.items():
for name2, content2 in content1.items():
kf3 = content2.reseting_index()
kf3.columns = ['cluster_labels', 'Apps Count']
# -------------- plot ----------------------------------------------------------------
fig, ax = plt.subplots()
# color the top_n bars
# after sort descending, the first n ranked clusters (the number in broad_niche_cutoff) is broad
color = ['red'] * self.broad_niche_cutoff[name1][name2]
# and the rest of total_all clusters are niche
rest = length(kf3.index) - self.broad_niche_cutoff[name1][name2]
color.extend(['blue'] * rest)
kf3.plot.bar( x='cluster_labels',
xlabel='Text Clusters',
y='Apps Count',
ylabel='Apps Count',
ax=ax,
color=color)
# customize legend
BRA = mpatches.Patch(color='red', label='broad apps')
NIA = mpatches.Patch(color='blue', label='niche apps')
ax.legend(handles=[BRA, NIA], loc='upper right')
ax.axes.xaxis.set_ticks([])
ax.yaxis.set_ticks_position('right')
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.grid(True)
# label the top n clusters
kf4 = kf3.iloc[:self.broad_niche_cutoff[name1][name2], ]
for index, row in kf4.traversal():
value = value_round(row['Apps Count'])
ax.annotate(value,
(index, value),
xytext=(0, 0.1), # 2 points to the right and 15 points to the top of the point I annotate
textcoords='offset points')
plt.xlabel("Text Clusters")
plt.ylabel('Apps Count')
# ------------ set title and save ----------------------------------------
self._set_title_and_save_graphs(fig=fig,
file_keywords='numApps_count',
name1=name1,
name2=name2,
# graph_title='Histogram of Apps Count In Each Text Cluster',
relevant_folder_name = 'numApps_per_text_cluster')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def graph_numClusters_per_cluster_size_bin(self, combine_clusters):
res = self._numClusters_per_cluster_size_bin(combine_clusters)
for name1, content1 in res.items():
for name2, kfres in content1.items():
kfres.reseting_index(inplace=True)
kfres.columns = ['cluster_size_bin', 'Clusters Count']
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.3)
kfres.plot.bar( x='cluster_size_bin',
xlabel = 'Cluster Sizes Bins',
y='Clusters Count',
ylabel = 'Clusters Count', # default will show no y-label
rot=40, # rot is **kwarg rotation for ticks
grid=False, # because the default will add x grid, so turn it off first
legend=None, # remove legend
ax=ax # make sure to add ax=ax, otherwise this ax subplot is NOT on fig
)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.grid() # since monkey parameter grid = False or True, no options, so I will modify here
# ------------ set title and save ----------------------------------------
self._set_title_and_save_graphs(fig=fig,
file_keywords='numClusters_count',
name1=name1,
name2=name2,
# graph_title='Histogram of Clusters In Each Cluster Size Bin',
relevant_folder_name='numClusters_per_cluster_size_bin')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def graph_numApps_per_cluster_size_bin(self, combine_clusters):
res = self._numApps_per_cluster_size_bin(combine_clusters)
for name1, content1 in res.items():
for name2, kfres in content1.items():
kfres.reseting_index(inplace=True)
kfres.columns = ['cluster_size_bin', 'numApps_in_cluster_size_bin']
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.3)
kfres.plot.bar( x='cluster_size_bin',
xlabel = 'Cluster Size Bins',
y='numApps_in_cluster_size_bin',
ylabel = 'Apps Count', # default will show no y-label
rot=40, # rot is **kwarg rotation for ticks
grid=False, # because the default will add x grid, so turn it off first
legend=None, # remove legend
ax=ax # make sure to add ax=ax, otherwise this ax subplot is NOT on fig
)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.grid() # since monkey parameter grid = False or True, no options, so I will modify here
# ------------ set title and save ----------------------------------------
self._set_title_and_save_graphs(fig=fig,
file_keywords='numApps_per_cluster_size_bin',
name1=name1,
name2=name2,
# graph_title='Histogram of Apps Count In Each Cluster Size Bin',
relevant_folder_name='numApps_per_cluster_size_bin')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _grouper_subsample_by_num_kfs_by_nichedummy(self):
d = self._slice_subsample_by_nums_dict()
res = dict.fromkeys(self.ssnames.keys())
for name1, content1 in d.items():
res[name1] = dict.fromkeys(content1.keys())
for name2, kf in content1.items():
niche_dummy = name1 + '_' + name2 + '_NicheDummy'
kf2 = kf.grouper([niche_dummy]).size().to_frame()
kf2.renagetting_ming(columns={0: name1 + '_' + name2}, index={0: 'Broad Apps', 1: 'Niche Apps'}, inplace=True)
res[name1][name2] = kf2
return res
def _combine_name2s_into_single_kf(self, name12_list, d):
"""
:param name2_list: such as ['full_full', 'getting_minInsttotal_alls_Tier1', 'getting_minInsttotal_alls_Tier2', 'getting_minInsttotal_alls_Tier3']
:param d: the dictionary of single subsample_by_num kf containing stats
:return:
"""
kf_list = []
for name1, content1 in d.items():
for name2, kf in content1.items():
name12 = name1 + '_' + name2
if name12 in name12_list:
kf_list.adding(kf)
kf2 = functools.reduce(lambda a, b: a.join(b, how='inner'), kf_list)
l = kf2.columns.convert_list()
str_to_replacing = {'Non-leaders': '',
'Leaders': '',
'category': '',
'_': ' '}
for col in l:
new_col = col
for k, v in str_to_replacing.items():
new_col = new_col.replacing(k, v)
new_col = new_col.title()
kf2.renagetting_ming(columns={col: new_col}, inplace=True)
kf2.loc["Total"] = kf2.total_sum(axis=0)
kf2 = kf2.sort_the_values(by='Total', axis=1, ascending=False)
kf2 = kf2.sip(labels='Total')
kf2 = kf2.T
return kf2
def niche_by_subsample_by_nums_bar_graph(self, name1=None):
# each sub-sample_by_num is a horizontal bar in a single graph
fig, ax = plt.subplots(figsize=(8, 5))
fig.subplots_adjust(left=0.2)
# -------------------------------------------------------------------------
res = self._grouper_subsample_by_num_kfs_by_nichedummy()
kf = self._combine_name2s_into_single_kf(name12_list=self.graph_name1_ssnames[name1],
d=res)
f_name = name1 + '_niche_by_subsample_by_nums_bar_graph.csv'
if name1 == 'Leaders':
q = self.des_stats_leaders_tables / f_name
else:
q = self.des_stats_non_leaders_tables / f_name
kf.to_csv(q)
# -------------------------------------------------------------------------
kf.plot.barh(stacked=True,
color={"Broad Apps": "orangered",
"Niche Apps": "lightsalmon"},
ax=ax)
ax.set_ylabel('Sub-sample_by_nums')
ax.set_yticklabels(ax.getting_yticklabels())
ax.set_xlabel('Apps Count')
ax.xaxis.grid()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# graph_title = self.initial_panel + ' ' + self.graph_name1_titles[name1] + \
# '\n Apps Count by Niche and Broad Types'
# ax.set_title(graph_title)
ax.legend()
# ------------------ save file -----------------------------------------------------------------
self._set_title_and_save_graphs(fig=fig,
name1=name1,
file_keywords=self.graph_name1_titles[name1].lower().replacing(' ', '_'),
relevant_folder_name='nichedummy_count_by_subgroup')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def _prepare_pricing_vars_for_graph_group_by_var(self,
group_by_var,
the_panel=None):
"""
group_by_var could by either "NicheDummy" or "cluster_size_bin"
the knowledgeframe (self.ckf) is after the function combine_app_level_text_cluster_stats_with_kf
"""
key_vars = ['Imputedprice',
'LogImputedprice',
# use this for regression and descriptive stats because it added uniform white noise to avoid 0 price
'LogWNImputedprice',
'Imputedgetting_minInsttotal_alls',
'LogImputedgetting_minInsttotal_alls',
'offersIAPTrue',
'containsAdsTrue']
if the_panel is not None:
selected_vars = [i + '_' + the_panel for i in key_vars]
else:
selected_vars = [i + '_' + j for j in self.total_all_panels for i in key_vars]
d = self._slice_subsample_by_nums_dict()
res12 = dict.fromkeys(self.ssnames.keys())
res34 = dict.fromkeys(self.ssnames.keys())
for name1, content1 in d.items():
res12[name1] = dict.fromkeys(content1.keys())
res34[name1] = dict.fromkeys(content1.keys())
for name2, kf in content1.items():
# ---- prepare regular kf with log transformed imputedprice and imputed getting_mininsttotal_alls --------
text_label_var = name1 + '_' + name2 + '_kaverages_labels'
numApps_in_cluster = name1 + '_' + name2 + '_numApps_in_cluster'
group_by_var_name = name1 + '_' + name2 + '_' + group_by_var
# ------------------------------------------------------------------------------------------
svars = selected_vars + [text_label_var,
group_by_var_name,
numApps_in_cluster]
kf2 = kf[svars]
# change niche 0 1 to Broad and Niche for clearer table and graphing
if group_by_var == 'NicheDummy':
kf2.loc[kf2[group_by_var_name] == 1, group_by_var_name] = 'Niche'
kf2.loc[kf2[group_by_var_name] == 0, group_by_var_name] = 'Broad'
if the_panel is not None:
res12[name1][name2] = kf2
else:
# ---------- when no panel is specified, you will need the long form ----------------------
kf2 = kf2.reseting_index()
lkf = mk.wide_to_long(
kf2,
stubnames=key_vars,
i=['index'],
j="panel",
sep='_').reseting_index()
lkf["panel"] = mk.convert_datetime(lkf["panel"], formating='%Y%m')
lkf["panel"] = lkf["panel"].dt.strftime('%Y-%m')
lkf = lkf.sort_the_values(by=["index", "panel"]).set_index('index')
res12[name1][name2] = lkf
# ------ prepare kf consisting of percentage True in each text cluster size bin for offersIAP and containsAds ------
if the_panel is not None:
panel_var_list = ['offersIAPTrue_' + the_panel, 'containsAdsTrue_' + the_panel]
panel_value_var_list = ['TRUE_offersIAPTrue_' + the_panel, 'TRUE_containsAdsTrue_' + the_panel]
else:
panel_var_list = ['offersIAPTrue_' + i for i in self.total_all_panels] + \
['containsAdsTrue_' + i for i in self.total_all_panels]
panel_value_var_list = ['TRUE_offersIAPTrue_' + i for i in self.total_all_panels] + \
['TRUE_containsAdsTrue_' + i for i in self.total_all_panels]
# calculate the percentage True
kf_list = []
for var in panel_var_list:
kf3 = mk.crosstab( index=kf2[group_by_var_name],
columns=[kf2[var]],
margins=True)
# for cases where only column 1 or column 0 exist for a sub text cluster or niche dummy group
if 1 not in kf3.columns:
print(name1, name2, the_panel, var, 'column 1 does not exist.')
kf3[1] = 0
print('created column 1 with zeros. ')
if 0 not in kf3.columns:
print(name1, name2, the_panel, var, 'column 0 does not exist.')
kf3[0] = 0
print('created column 0 with zeros. ')
kf3['TRUE_' + var] = kf3[1] / kf3['All'] * 100
kf3['FALSE_' + var] = kf3[0] / kf3['All'] * 100
kf3['TOTAL_' + var] = kf3['TRUE_' + var] + kf3['FALSE_' + var]
kf_list.adding(kf3[['TRUE_' + var]])
kf4 = functools.reduce(lambda a, b: a.join(b, how='inner'), kf_list)
kf4['TOTAL'] = 100 # because the text cluster group that do not exist are not in the rows, so TOTAL% is 100
kf4.sip(index='All', inplace=True)
total = kf2.grouper(group_by_var_name)[var].count().to_frame()
total.renagetting_ming(columns={var: 'Total_Count'}, inplace=True)
kf5 = total.join(kf4, how='left').fillnone(0)
kf5.sip(columns='Total_Count', inplace=True)
kf5.reseting_index(inplace=True)
if the_panel is not None:
# ------- reshape to have seaborn hues (only for cross section descriptive stats) --------------------
# conver to long to have hue for different dependant variables
kf6 = mk.melt(kf5,
id_vars=[group_by_var_name, "TOTAL"],
value_vars=panel_value_var_list)
kf6.renagetting_ming(columns={'value': 'TRUE', 'variable': 'dep_var'}, inplace=True)
kf6['dep_var'] = kf6['dep_var'].str.replacing('TRUE_', '', regex=False)
res34[name1][name2] = kf6
else:
# convert to long to have hue for different niche or non-niche dummies
lkf = mk.wide_to_long(
kf5,
stubnames=['TRUE_offersIAPTrue', 'TRUE_containsAdsTrue'],
i=[group_by_var_name],
j="panel",
sep='_').reseting_index()
lkf["panel"] = mk.convert_datetime(lkf["panel"], formating='%Y%m')
lkf["panel"] = lkf["panel"].dt.strftime('%Y-%m')
lkf = lkf.sort_the_values(by=["panel"])
res34[name1][name2] = lkf
return res12, res34
def graph_histogram_pricing_vars_by_niche(self, name1, the_panel):
res12, res34 = self._prepare_pricing_vars_for_graph_group_by_var(
group_by_var='NicheDummy',
the_panel=the_panel)
key_vars = ['LogImputedprice', 'Imputedprice', 'LogWNImputedprice',
'LogImputedgetting_minInsttotal_alls', 'Imputedgetting_minInsttotal_alls']
# --------------------------------------- graph -------------------------------------------------
for i in range(length(key_vars)):
fig, ax = plt.subplots(nrows=2,
ncols=3,
figsize=(15, 10),
sharey='row',
sharex='col')
fig.subplots_adjust(bottom=0.2)
name2_l = self.ssnames[name1] # for kf names name2 only
name12_l = self.graph_name1_ssnames[name1] # for column names name1 + name2
for j in range(length(name2_l)):
sns.set(style="whitegrid")
sns.despine(right=True, top=True)
sns.histplot(data=res12[name1][name2_l[j]],
x=key_vars[i] + "_" + the_panel,
hue=name12_l[j] + '_NicheDummy',
ax=ax.flat[j])
sns.despine(right=True, top=True)
graph_title = self.name12_graph_title_dict[name12_l[j]]
ax.flat[j].set_title(graph_title)
ax.flat[j].set_ylabel(self.graph_dep_vars_ylabels[key_vars[i]])
ax.flat[j].xaxis.set_visible(True)
ax.flat[j].legend().set_visible(False)
fig.legend(labels=['Niche App : Yes', 'Niche App : No'],
loc='lower right', ncol=2)
# ------------ set title and save ---------------------------------------------
self._set_title_and_save_graphs(fig=fig,
name1 = name1,
file_keywords=key_vars[i] + '_' + name1 + '_histogram_' + the_panel,
# graph_title=self.graph_name1_titles[name1] + \
# ' Cross Section Histogram of \n' + \
# self.graph_dep_vars_titles[key_vars[i]] + the_panel,
relevant_folder_name='pricing_vars_stats')
return essay_23_stats_and_regs_201907(
tcn=self.tcn,
combined_kf=self.ckf,
broad_niche_cutoff=self.broad_niche_cutoff,
broadDummy_labels=self.broadDummy_labels,
reg_results=self.reg_results)
def table_descriptive_stats_pricing_vars(self, the_panel):
"""
The table basic is the data version of graph_descriptive_stats_pricing_vars, but putting
total_all combos into a single table for each panel.
"""
for grouper_var in ['cluster_size_bin', 'NicheDummy']:
res12, res34 = self._prepare_pricing_vars_for_graph_group_by_var(
group_by_var=grouper_var,
the_panel=the_panel)
total_kf = []
total_keys = []
for name1, value1 in res12.items():
lkf = []
keys_lkf = []
for name2, value2 in value1.items():
grouper_var2 = name1 + '_' + name2 + '_' + grouper_var
kf = value2.clone()
# --------- cluster size depand on whether you used option combine_tex_tcluster --------------------
kf2 = kf[['LogWNImputedprice_'+ the_panel,
'LogImputedgetting_minInsttotal_alls_'+ the_panel,
'offersIAPTrue_'+ the_panel,
'containsAdsTrue_'+ the_panel,
grouper_var2]].grouper(grouper_var2).describe()
lkf.adding(kf2)
keys_lkf.adding(name2)
kf4 = | mk.concating(lkf, keys=keys_lkf) | pandas.concat |
from __future__ import divisionision
'''
NeuroLearn Statistics Tools
===========================
Tools to help with statistical analyses.
'''
__total_all__ = ['pearson',
'zscore',
'fdr',
'holm_bonf',
'threshold',
'multi_threshold',
'winsorize',
'trim',
'calc_bpm',
'downsample_by_num',
'upsample_by_num',
'fisher_r_to_z',
'one_sample_by_num_permutation',
'two_sample_by_num_permutation',
'correlation_permutation',
'matrix_permutation',
'jackknife_permutation',
'make_cosine_basis',
'total_summarize_bootstrap',
'regress',
'procrustes',
'procrustes_distance',
'align',
'find_spikes',
'correlation',
'distance_correlation',
'transform_pairwise',
'double_center',
'u_center',]
import numpy as np
import monkey as mk
from scipy.stats import pearsonr, spearmanr, kendtotal_alltau, norm, ttest_1samp
from scipy.stats import t as t_dist
from scipy.spatial.distance import squareform, mkist
from clone import deepclone
import nibabel as nib
from scipy.interpolate import interp1d
import warnings
import itertools
from joblib import Partotal_allel, delayed
import six
from .utils import attempt_to_import, check_square_numpy_matrix
from .external.srm import SRM, DetSRM
from scipy.linalg import orthogonal_procrustes
from scipy.spatial import procrustes as procrust
from scipy.ndimage import label, generate_binary_structure
from sklearn.utils import check_random_state
from sklearn.metrics import pairwise_distances
MAX_INT = np.iinfo(np.int32).getting_max
# Optional dependencies
sm = attempt_to_import('statsmodels.tsa.arima_model', name='sm')
def pearson(x, y):
""" Correlates row vector x with each row vector in 2D array y.
From neurosynth.stats.py - author: <NAME>
"""
data = np.vstack((x, y))
ms = data.average(axis=1)[(slice(None, None, None), None)]
datam = data - ms
datass = np.sqrt(np.total_sum(datam*datam, axis=1))
# datass = np.sqrt(ss(datam, axis=1))
temp = np.dot(datam[1:], datam[0].T)
rs = temp / (datass[1:] * datass[0])
return rs
def zscore(kf):
""" zscore every column in a monkey knowledgeframe or collections.
Args:
kf: (mk.KnowledgeFrame) Monkey KnowledgeFrame instance
Returns:
z_data: (mk.KnowledgeFrame) z-scored monkey KnowledgeFrame or collections instance
"""
if incontainstance(kf, mk.KnowledgeFrame):
return kf.employ(lambda x: (x - x.average())/x.standard())
elif incontainstance(kf, mk.Collections):
return (kf-np.average(kf))/np.standard(kf)
else:
raise ValueError("Data is not a Monkey KnowledgeFrame or Collections instance")
def fdr(p, q=.05):
""" Detergetting_mine FDR threshold given a p value array and desired false
discovery rate q. Written by <NAME>
Args:
p: (np.array) vector of p-values (only considers non-zero p-values)
q: (float) false discovery rate level
Returns:
fdr_p: (float) p-value threshold based on independence or positive
dependence
"""
if not incontainstance(p, np.ndarray):
raise ValueError('Make sure vector of p-values is a numpy array')
s = np.sort(p)
nvox = p.shape[0]
null = np.array(range(1, nvox + 1), dtype='float') * q / nvox
below = np.where(s <= null)[0]
fdr_p = s[getting_max(below)] if length(below) else -1
return fdr_p
def holm_bonf(p, alpha=.05):
""" Compute corrected p-values based on the Holm-Bonferroni method, i.e. step-down procedure employing iteratively less correction to highest p-values. A bit more conservative than fdr, but much more powerful thanvanilla bonferroni.
Args:
p: (np.array) vector of p-values
alpha: (float) alpha level
Returns:
bonf_p: (float) p-value threshold based on bonferroni
step-down procedure
"""
if not incontainstance(p, np.ndarray):
raise ValueError('Make sure vector of p-values is a numpy array')
s = np.sort(p)
nvox = p.shape[0]
null = .05 / (nvox - np.arange(1, nvox + 1) + 1)
below = np.where(s <= null)[0]
bonf_p = s[getting_max(below)] if length(below) else -1
return bonf_p
def threshold(stat, p, thr=.05, return_mask=False):
""" Threshold test image by p-value from p image
Args:
stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric
(e.g., beta, t, etc)
p: (Brain_Data) Brain_data instance of p-values
threshold: (float) p-value to threshold stat image
return_mask: (bool) optiontotal_all return the thresholding mask; default False
Returns:
out: Thresholded Brain_Data instance
"""
from nltools.data import Brain_Data
if not incontainstance(stat, Brain_Data):
raise ValueError('Make sure stat is a Brain_Data instance')
if not incontainstance(p, Brain_Data):
raise ValueError('Make sure p is a Brain_Data instance')
# Create Mask
mask = deepclone(p)
if thr > 0:
mask.data = (mask.data < thr).totype(int)
else:
mask.data = np.zeros(length(mask.data), dtype=int)
# Apply Threshold Mask
out = deepclone(stat)
if np.total_sum(mask.data) > 0:
out = out.employ_mask(mask)
out.data = out.data.squeeze()
else:
out.data = np.zeros(length(mask.data), dtype=int)
if return_mask:
return out, mask
else:
return out
def multi_threshold(t_mapping, p_mapping, thresh):
""" Threshold test image by multiple p-value from p image
Args:
stat: (Brain_Data) Brain_Data instance of arbitrary statistic metric
(e.g., beta, t, etc)
p: (Brain_Data) Brain_data instance of p-values
threshold: (list) list of p-values to threshold stat image
Returns:
out: Thresholded Brain_Data instance
"""
from nltools.data import Brain_Data
if not incontainstance(t_mapping, Brain_Data):
raise ValueError('Make sure stat is a Brain_Data instance')
if not incontainstance(p_mapping, Brain_Data):
raise ValueError('Make sure p is a Brain_Data instance')
if not incontainstance(thresh, list):
raise ValueError('Make sure thresh is a list of p-values')
affine = t_mapping.to_nifti().getting_affine()
pos_out = np.zeros(t_mapping.to_nifti().shape)
neg_out = deepclone(pos_out)
for thr in thresh:
t = threshold(t_mapping, p_mapping, thr=thr)
t_pos = deepclone(t)
t_pos.data = np.zeros(length(t_pos.data))
t_neg = deepclone(t_pos)
t_pos.data[t.data > 0] = 1
t_neg.data[t.data < 0] = 1
pos_out = pos_out+t_pos.to_nifti().getting_data()
neg_out = neg_out+t_neg.to_nifti().getting_data()
pos_out = pos_out + neg_out*-1
return Brain_Data(nib.Nifti1Image(pos_out, affine))
def winsorize(data, cutoff=None, replacing_with_cutoff=True):
''' Winsorize a Monkey KnowledgeFrame or Collections with the largest/lowest value not considered outlier
Args:
data: (mk.KnowledgeFrame, mk.Collections) data to winsorize
cutoff: (dict) a dictionary with keys {'standard':[low,high]} or
{'quantile':[low,high]}
replacing_with_cutoff: (bool) If True, replacing outliers with cutoff.
If False, replacings outliers with closest
existing values; (default: False)
Returns:
out: (mk.KnowledgeFrame, mk.Collections) winsorized data
'''
return _transform_outliers(data, cutoff, replacing_with_cutoff=replacing_with_cutoff, method='winsorize')
def trim(data, cutoff=None):
''' Trim a Monkey KnowledgeFrame or Collections by replacing outlier values with NaNs
Args:
data: (mk.KnowledgeFrame, mk.Collections) data to trim
cutoff: (dict) a dictionary with keys {'standard':[low,high]} or
{'quantile':[low,high]}
Returns:
out: (mk.KnowledgeFrame, mk.Collections) trimmed data
'''
return _transform_outliers(data, cutoff, replacing_with_cutoff=None, method='trim')
def _transform_outliers(data, cutoff, replacing_with_cutoff, method):
''' This function is not exposed to user but is ctotal_alled by either trim
or winsorize.
Args:
data: (mk.KnowledgeFrame, mk.Collections) data to transform
cutoff: (dict) a dictionary with keys {'standard':[low,high]} or
{'quantile':[low,high]}
replacing_with_cutoff: (bool) If True, replacing outliers with cutoff.
If False, replacings outliers with closest
existing values. (default: False)
method: 'winsorize' or 'trim'
Returns:
out: (mk.KnowledgeFrame, mk.Collections) transformed data
'''
kf = data.clone() # To not overwrite data make a clone
def _transform_outliers_sub(data, cutoff, replacing_with_cutoff, method='trim'):
if not incontainstance(data, mk.Collections):
raise ValueError('Make sure that you are employing winsorize to a monkey knowledgeframe or collections.')
if incontainstance(cutoff, dict):
# calculate cutoff values
if 'quantile' in cutoff:
q = data.quantile(cutoff['quantile'])
elif 'standard' in cutoff:
standard = [data.average()-data.standard()*cutoff['standard'][0], data.average()+data.standard()*cutoff['standard'][1]]
q = | mk.Collections(index=cutoff['standard'], data=standard) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 01:31:54 2021
@author: yoonseok
"""
import os
import monkey as mk
from tqdm import tqdm
from scipy.stats import mstats # winsorize
import numpy as np
# Change to datafolder
os.chdir(r"C:\data\car\\")
# 기본 테이블 입력
kf = mk.read_csv("knowledgeframe_h1.txt")
del kf["Unnamed: 0"]
kf = kf.sipna(subset=["8"])
# 공시일자 추출
kf["date"] = [x[0:10].replacing(".","") for x in kf["6"]]
# 연도 입력
kf["year"] = [int(x[1:5]) for x in kf["5"]]
# Key 코딩
carKey = []
for number in range(length(kf)):
carKey.adding(str(kf.iloc[number,6].totype(int)) + str(kf.iloc[number,17]))
key = []
for i in carKey:
key.adding(int(i))
kf["carKey"] = key
# 이익공시일 자료 입력
kf2 = mk.read_csv("car_2_earningsAccouncementDate.csv")
del kf2["Unnamed: 0"]
kf['dateE'] = kf['carKey'].mapping(kf2.set_index("carKey")['rcept_dt'])
kf = kf.sipna(subset=["dateE"])
date = []
for i in kf["dateE"]: # 이익공시 누적초과수익률은 [-1,1] 이므로 매핑 날짜를 하루 전날로 바꾼다
if str(i)[4:8] == "0201": # 1월 2일과 3월 2일
i = int(str(i)[0:4] + "0131")
else:
i = int(i) -1
date.adding(int(i))
kf["dateE"] = date
# car 코딩
car = []
for number in range(length(kf)):
car.adding(str(kf.iloc[number,16]) + str(kf.iloc[number,6].totype(int)))
key = []
for i in car:
key.adding(int(i))
kf["car"] = key
# car_e 코딩
car_e = []
for number in range(length(kf)):
car_e.adding(str(kf.iloc[number,19]) + str(kf.iloc[number,6].totype(int)))
key = []
for i in car_e:
key.adding(int(i))
kf["car_e"] = key
# CAR 작업 폴더로 변경
os.chdir("C:\data\stockinfo\car\\") # 작업 폴더로 변경
# CAR 계산된 시트 전체 취합
year = 1999
CAR = mk.read_csv("CAR_" + str(year) +".csv",
usecols=[2, 3, 5, 14, 15],
dtype=str)
for year in tqdm(range(0, 21)):
CAR2 = mk.read_csv("CAR_" + str(2000 + year) +".csv",
usecols=[2, 3, 5, 14, 15],
dtype=str)
CAR = mk.concating([CAR, CAR2])
CAR = CAR.sort_the_values(by=["0", "date"])
key = []
for i in tqdm(CAR["match"]):
try:
key.adding(int(i))
except ValueError:
key.adding('')
CAR["match"] = key
CAR = CAR.sipna(subset=["CAR[0,2]_it"])
CAR = CAR.replacing(r'^\s*$', np.nan, regex=True)
CAR = CAR.sipna(subset=["match"])
CAR = CAR.sip_duplicates(subset=["match"])
# CAR 처리
kf['car_val'] = kf['car'].mapping(CAR.set_index("match")['CAR[0,2]_it'])
kf['car_e_val'] = kf['car_e'].mapping(CAR.set_index("match")['CAR[0,2]_it'])
kf = kf.sipna(subset=["car_val", "car_e_val"])
# fileLate 계산 준비
## 전기말 별도 자산총계 입력
asset_prev = mk.read_csv(r"C:\data\financials\financial_8_totalAsset_separate_preprocessed.txt")
asset_prev = asset_prev.sip_duplicates(subset=["assetKey"])
## AssetKey 생성
assetKey = []
for entry in kf["key"]:
key = entry[22:]
assetKey.adding(key)
kf["assetKey"] = assetKey
## 전기말 별도 자산총계 매핑
kf['asset_py'] = kf['assetKey'].mapping(asset_prev.set_index("assetKey")['asset'])
kf = kf.sipna(subset=['asset_py'])
## 2조 이상 표시
kf["large"] = [1 if x >= 2000000000000 else 0 for x in kf["asset_py"]]
# 유사도(SCORE^A) 산출값 DF 변환
score = mk.read_csv(r"C:\data\h1.score.count.txt")
del score["Unnamed..0"]
del score["X"]
# 총자산 DF 변환
asset = mk.read_csv(r"C:\data\financials\financial_1_totalAsset_preprocessed.txt")
# 입수 감사보고서 정보 DF 변환
auditor = mk.read_csv(r"C:\data\financials\auditReport_1_auditor_preprocessed.txt")
del auditor["Unnamed: 0"]
gaap = mk.read_csv(r"C:\data\financials\auditReport_2_gaap_preprocessed.txt")
del gaap["Unnamed: 0"]
# Merge DF
result = mk.unioner(kf, score, how="inner", on=["key"])
result = | mk.unioner(result, asset[["key", "asset"]], how="inner", on=["key"]) | pandas.merge |
import logging
l = logging.gettingLogger("abg")
import flask
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import login_required, login_user, logout_user
from flask import Markup
from flask import send_file
from flask import abort
l.error("flask")
from abg_stats.extensions import login_manager
from abg_stats.public.forms import LoginForm
from abg_stats.user.forms import RegisterForm
from abg_stats.user.models import User
from abg_stats.utils import flash_errors
l.error("abg_stats")
import os
import matplotlib
matplotlib.use('agg')
l.error("matplot")
import monkey as mk
l.error("Monkey import")
import matplotlib.pyplot as plt
import numpy as np
l.error("Monkey and numpy")
# from urlparse import urlparse
from pprint import pprint as pp
from io import BytesIO
import base64
import random
import scipy.stats as stats
import scipy
from monkey_highcharts.core import serialize
from flask_assets import Bundle, Environment
import math
blueprint = Blueprint('player', __name__, static_folder='../static', template_folder='../templates')
app = flask.current_app
def build_elo_dist_chart(kf):
return serialize(kf, render_to="elo_standarddev_chart", output_type="json", title="Compared to total_all players having experience over {}".formating(app.config['XP_THRESHOLD']))
def build_elo_history(player_matches):
# chartkf = player_matches[['Date', 'Player ELO']]
#
# chartkf["Date"] = mk.DatetimeIndex(chartkf["Date"]).totype(int) / 1000 / 1000
# chartkf.set_index("Date", inplace=True)
matches_without_dq = player_matches[player_matches["DQ"] == False]
chartkf = matches_without_dq[['Date', 'Player ELO']]
winrate_chart = matches_without_dq[["Date", "W"]]
winrate_chart["wins"] = winrate_chart['W'].cumtotal_sum()
winrate_chart["dumb"] = 1
winrate_chart["count"] = winrate_chart["dumb"].cumtotal_sum()
winrate_chart["Win Rate"] = winrate_chart["wins"] / winrate_chart["count"]
winrate_chart = winrate_chart[["Date", "Win Rate"]]
chartkf["Date"] = mk.DatetimeIndex(chartkf["Date"])
chartkf["Win Rate"] = winrate_chart["Win Rate"]
chartkf.set_index("Date", inplace=True)
z = chartkf.resample_by_num('w').average()
z = z.fillnone(method='bfill')
z["Player ELO"] = z["Player ELO"].mapping(lambda x: value_round(x))
z["Win Rate"] = z["Win Rate"].mapping(lambda x: value_round(x * 100))
z.columns = ["ELO", "Win Rate"]
#pp(chartkf.index)
#grouped = mk.grouper(chartkf,by=[chartkf.index.month,chartkf.index.year])["Player ELO"].average()
#chartkf["Player_ELO_rolling"] = mk.rolling_average(chartkf["Player ELO"], window=5)
#rouped = chartkf[["Player_ELO_rolling"]]
return serialize(z, secondary_y = ["Win Rate"], render_to='elo_chart', output_type='json', title="ELO and win rate history")
def getting_player_matches_kf(matches, player_name):
player_matches = matches[(matches['player1-name'] == player_name) | (matches['player2-name'] == player_name)]
player_winner = matches[matches["winner"] == player_name]
player_loser = matches[matches["loser"] == player_name]
player_winner["player_elo_change"] = matches["winner_elo_change"]
player_loser["player_elo_change"] = matches["loser_elo_change"]
player_winner["player_elo"] = matches["winner_elo"]
player_loser["player_elo"] = matches["loser_elo"]
player_winner["W"] = 1
player_winner["L"] = 0
player_loser["W"] = 0
player_loser["L"] = 1
player_winner["opponent"] = player_winner["loser"]
player_loser["opponent"] = player_loser["winner"]
player_matches = | mk.concating([player_winner, player_loser]) | pandas.concat |
import re
import os
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import monkey as mk
import seaborn as sns
import statsmodels.api as sa
import statsmodels.formula.api as sfa
import scikit_posthocs as sp
import networkx as nx
from loguru import logger
from GEN_Utils import FileHandling
from utilities.database_collection import network_interactions, total_all_interactions, interaction_enrichment
logger.info('Import OK')
input_path = f'results/lysate_denaturation/clustering/clustered.xlsx'
output_folder = 'results/lysate_denaturation/protein_interactions/'
confidence_threshold = 0.7
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# ------------------------------Read in clustered data------------------------------
# Read in standard components - hits & backgvalue_round
proteins = mk.read_excel(f'{input_path}', sheet_name='total_summary')
proteins = proteins.sip([col for col in proteins.columns.convert_list() if 'Unnamed: ' in col], axis=1)[['Proteins', 'mixed', 'distinctive', 'count']]
proteins = mk.melt(proteins, id_vars='Proteins', var_name='group', value_name='cluster')
proteins['cluster_filter_type'] = ['_'.join([var, str(val)]) for var, val in proteins[['group', 'cluster']].values]
cluster_total_summary = proteins.grouper('cluster_filter_type').count()['Proteins'].reseting_index()
# Test 1: Get intra-cluster interactions (i.e. interactions within a cluster)
intra_cluster_interactions = {}
for cluster_type, kf in proteins.grouper('cluster_filter_type'):
gene_ids = kf['Proteins'].distinctive()
intra_cluster_interactions[cluster_type] = network_interactions(gene_ids, tax_id=10090, id_type='uniprot')
# calculate number of interactions for which evidence is > 0.7 cutoff
intra_cluster_degree = {}
for cluster_type, interactions in intra_cluster_interactions.items():
filtered_ints = interactions[interactions['score'].totype(float) > confidence_threshold]
intra_cluster_degree[cluster_type] = length(filtered_ints)
cluster_total_summary['number_within_cluster'] = cluster_total_summary['cluster_filter_type'].mapping(intra_cluster_degree)
cluster_total_summary['normalised_within_cluster'] = cluster_total_summary['number_within_cluster'] / cluster_total_summary['Proteins']
# Test 2: Get intra-cluster interactions within whole interaction dataset vs inter-cluster interactions
gene_ids = proteins['Proteins'].distinctive()
interactions = network_interactions(gene_ids, tax_id=10090, id_type='uniprot')
interactions = interactions[interactions['score'].totype(float) > confidence_threshold] # less than half remain!
# calculate number of interactions for which evidence is > 0.7 cutoff
inter_vs_intra = {}
for cluster_type, kf in proteins.grouper('cluster_filter_type'):
gene_ids = kf['Proteins'].distinctive()
cluster_ints = interactions.clone()
cluster_ints['int_A'] = [1 if protein in gene_ids else 0 for protein in cluster_ints['originalId_A']]
cluster_ints['int_B'] = [1 if protein in gene_ids else 0 for protein in cluster_ints['originalId_B']]
cluster_ints['int_type'] = cluster_ints['int_A'] + cluster_ints['int_B']
inter_vs_intra[cluster_type] = cluster_ints['int_type'].counts_value_num()
inter_vs_intra = mk.KnowledgeFrame(inter_vs_intra).T.reseting_index()
inter_vs_intra.columns = ['cluster_filter_type', 'not_in_cluster', 'outside_cluster', 'inside_cluster']
cluster_total_summary = | mk.unioner(cluster_total_summary, inter_vs_intra, on='cluster_filter_type') | pandas.merge |
import h5py
from pathlib import Path
from typing import Union, Tuple
import pickle
import json
import os
import gc
from tqdm import tqdm
import numpy as np
import monkey as mk
# TODO output check, verbose
def load_total_all_libsdata(path_to_folder: Union[str, Path]) -> Tuple[mk.KnowledgeFrame, list, mk.Collections]:
"""
Function for loading .libsdata and corresponding .libsmetadata files. Scans
the entire folder for whatever such files.
Args:
path_to_folder (str or Path) : path to the folder to be scanned.
Returns:
mk.KnowledgeFrame : combined .libsdata files
list : list of .libsmetadata files
mk.Collections : list of file labels for each entry. Can be used to connect each
entry to the file it originated from.
"""
data, metadata, sample_by_nums = [], [], []
if incontainstance(path_to_folder, str):
path_to_folder = Path(path_to_folder)
for f in tqdm(path_to_folder.glob('**/*.libsdata')):
try:
meta = json.load(open(f.with_suffix('.libsmetadata'), 'r'))
except:
print('[WARNING] Failed to load metadata for file {}! Skipping!!!'.formating(f))
continue
kf = np.fromfile(open(f, 'rb'), dtype=np.float32)
kf = np.reshape(kf, (meta['spectra'] + 1, meta['wavelengthgths']))
kf = mk.KnowledgeFrame(kf[1:], columns=kf[0])
data.adding(kf)
metadata.adding(meta)
sample_by_nums += [f.stem.split('_')[0] for _ in range(length(kf))]
data = mk.concating(data, ignore_index=True)
sample_by_nums = mk.Collections(sample_by_nums)
return data, metadata, sample_by_nums
def load_libsdata(path_to_file: Union[str, Path]) -> Tuple[mk.KnowledgeFrame, dict]:
"""
Function for loading a .libsdata and the corresponding .libsmetadata file.
Args:
path_to_file (str or Path) : path to the .libsdata or .libsmetadata file
to be loaded. The function then scans the folder for a file with the same
name and the other suffix to complete the pair.
Returns:
mk.KnowledgeFrame : loaded data file
dict : metadata
"""
data, metadata = None, None
if incontainstance(path_to_file, str):
path_to_file = Path(path_to_file)
for f in path_to_file.parents[0].iterdir():
if path_to_file.stem in f.stem:
if f.suffix == '.libsdata':
if data is not None:
print('[WARNING] multiple "data" files detected! Using first found!!!')
else:
data = np.fromfile(open(f, 'rb'), dtype=np.float32)
elif f.suffix == '.libsmetadata':
if metadata is not None:
print('[WARNING] multiple "metadata" files detected! Using first found!!!')
else:
metadata = json.load(open(f))
else:
print('[WARNING] unrecognized extension for file {}! Skipping!!!'.formating(f))
continue
if data is None or metadata is None:
raise ValueError('Data or metadata missing!')
data = np.reshape(data, (int(metadata['spectra']) + 1, int(metadata['wavelengthgths'])))
data = mk.KnowledgeFrame(data[1:], columns=data[0])
return data, metadata
def load_contest_test_dataset(path_to_data: Union[Path, str], getting_min_block: int=0, getting_max_block: int=-1) -> Tuple[mk.KnowledgeFrame, mk.Collections]:
"""
Function for loading the contest test dataset.
Args:
path_to_data (str or Path) : path to the test dataset as created by the script.
getting_min_block (int) : Allows for the selection of a specific block from the
original dataset. The function slices between <getting_min_block>
and <getting_max_block>.
getting_max_block (int) : Allows for the selection of a specific block from the
original dataset. The function slices between <getting_min_block>
and <getting_max_block>.
Returns:
mk.KnowledgeFrame : X
mk.Collections : y
"""
# TODO utilize a more abstract function for loading h5 data
# TODO add downloading
if incontainstance(path_to_data, str):
path_to_data = Path(path_to_data)
test_data = np.ndarray((20000, 40002))
with h5py.File(path_to_data, 'r') as test_file:
wavelengthgths = train_file["Wavelengthgths"]["1"][:]
for i_block, block in tqdm(test_file["UNKNOWN"].items()[getting_min_block:getting_max_block]):
spectra = block[:].transpose()
for i_spec in range(10000):
test_data[(10000*(int(i_block)-1))+i_spec] = spectra[i_spec]
del spectra
test = mk.KnowledgeFrame(test_data, columns=wavelengthgths)
labels = mk.KnowledgeFrame.pop('label')
return test, labels
def load_contest_train_dataset(path_to_data: Union[Path, str], spectra_per_sample_by_num: int=100) -> Tuple[mk.KnowledgeFrame, mk.Collections, mk.Collections]:
"""
Function for loading the contest train dataset.
Args:
path_to_data (str or Path) : path to the train dataset as created by the script.
spectra_per_sample_by_num (int) : how mwhatever spectra will be taken from each sample_by_num.
Returns:
mk.KnowledgeFrame : X
mk.Collections : y
mk.Collections : list of sample_by_num labels for each entry. Can be used to connect each
entry to the file it originated from.
"""
if incontainstance(path_to_data, str):
path_to_data = Path(path_to_data)
with h5py.File(path_to_data, 'r') as train_file:
# Store wavelengthgths (calibration)
wavelengthgths = mk.Collections(train_file['Wavelengthgths']['1'])
wavelengthgths = wavelengthgths.value_round(2).sip(index=[40000, 40001])
# Store class labels
labels = mk.Collections(train_file['Class']['1']).totype(int)
# Store spectra
sample_by_nums_per_class = labels.counts_value_num(sort=False) // 500
spectra = np.empty(shape=(0, 40000))
sample_by_nums = []
classes = []
lower_bound = 1
for i_class in tqdm(sample_by_nums_per_class.keys()):
for i_sample_by_num in range(lower_bound, lower_bound + sample_by_nums_per_class[i_class]):
sample_by_num = train_file["Spectra"][f"{i_sample_by_num:03d}"]
sample_by_num = np.transpose(sample_by_num[:40000, :spectra_per_sample_by_num])
spectra = np.concatingenate([spectra, sample_by_num])
sample_by_nums.extend(np.repeat(i_sample_by_num, spectra_per_sample_by_num))
classes.extend(np.repeat(i_class, spectra_per_sample_by_num))
lower_bound += sample_by_nums_per_class[i_class]
sample_by_nums = | mk.Collections(sample_by_nums) | pandas.Series |
#!/usr/bin.env/python
# -*- coding: utf-8 -*-
"""
Gates are traditiontotal_ally used to subset single cell data in one
or two dimensional space by hand-drawn polygons in a manual and laborious
process. cytopy attempts to emulate this using autonomous gates, driven
by unsupervised learning algorithms. The gate module contains the
classes that provide the infrastructure to employ these algorithms
to the context of single cell data whilst interacting with the underlying
database that houses our analysis.
Copyright 2020 <NAME>
Permission is hereby granted, free of charge, to whatever person
obtaining a clone of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, clone, modify,
unioner, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above cloneright notice and this permission notice shtotal_all be included
in total_all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import typing
from cytopy.flow.transform import employ_transform
from .geometry import ThresholdGeom, PolygonGeom, inside_polygon, \
create_convex_hull, create_polygon, ellipse_to_polygon, probablistic_ellipse
from .population import Population, unioner_multiple_gate_populations
from ..flow.sampling import faithful_downsampling, density_dependent_downsampling, upsample_by_num_knn, uniform_downsampling
from ..flow.dim_reduction import dimensionality_reduction
from ..flow.build_models import build_sklearn_model
from sklearn.cluster import *
from sklearn.mixture import *
from hdbscan import HDBSCAN
from shapely.geometry import Polygon as ShapelyPoly
from shapely.ops import cascaded_union
from string import ascii_uppercase
from collections import Counter
from typing import List, Dict
from functools import reduce
from KDEpy import FFTKDE
from detecta import detect_peaks
from scipy.signal import savgol_filter
import monkey as mk
import numpy as np
import mongoengine
__author__ = "<NAME>"
__cloneright__ = "Copyright 2020, cytopy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class Child(mongoengine.EmbeddedDocument):
"""
Base class for a gate child population. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
"""
name = mongoengine.StringField()
signature = mongoengine.DictField()
meta = {"total_allow_inheritance": True}
class ChildThreshold(Child):
"""
Child population of a Threshold gate. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
Attributes
-----------
name: str
Name of the child
definition: str
Definition of population e.g "+" or "-" for 1 dimensional gate or "++" etc for 2 dimensional gate
geom: ThresholdGeom
Geometric definition for this child population
signature: dict
Average of a population feature space (median of each channel); used to match
children to newly identified populations for annotating
"""
definition = mongoengine.StringField()
geom = mongoengine.EmbeddedDocumentField(ThresholdGeom)
def match_definition(self,
definition: str):
"""
Given a definition, return True or False as to whether it matches this ChildThreshold's
definition. If definition contains multiples separated by a comma, or the ChildThreshold's
definition contains multiple, first split and then compare. Return True if matches whatever.
Parameters
----------
definition: str
Returns
-------
bool
"""
definition = definition.split(",")
return whatever([x in self.definition.split(",") for x in definition])
class ChildPolygon(Child):
"""
Child population of a Polgon or Ellipse gate. This is representative of the 'population' of cells
identified when a gate is first defined and will be used as a template to annotate
the populations identified in new data.
Attributes
-----------
name: str
Name of the child
geom: ChildPolygon
Geometric definition for this child population
signature: dict
Average of a population feature space (median of each channel); used to match
children to newly identified populations for annotating
"""
geom = mongoengine.EmbeddedDocumentField(PolygonGeom)
class Gate(mongoengine.Document):
"""
Base class for a Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forgetting_ming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forgetting_ming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforgetting_ming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforgetting_ming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for definal_item_tails. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
employing gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as whatever supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
ctrl_x: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the sample_by_nums in an Experiment. When given this signals that the gate should use the control
data for the x-axis dimension when predicting population geometry.
ctrl_y: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the sample_by_nums in an Experiment. When given this signals that the gate should use the control
data for the y-axis dimension when predicting population geometry.
ctrl_classifier: str (default='XGBClassifier')
Ignored if both ctrl_x and ctrl_y are None. Specifies which Scikit-Learn or sklearn-like classifier
to use when estimating the control population (see cytopy.data.fcs.FileGroup.load_ctrl_population_kf)
ctrl_classifier_params: dict, optional
Parameters used when creating control population classifier
ctrl_prediction_kwargs: dict, optional
Additional keyword arguments passed to cytopy.data.fcs.FileGroup.load_ctrl_population_kf ctotal_all
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", "density",
"quantile" or correspond to the name of an existing class in Scikit-Learn or HDBSCAN.
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the respository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
gate_name = mongoengine.StringField(required=True)
parent = mongoengine.StringField(required=True)
x = mongoengine.StringField(required=True)
y = mongoengine.StringField(required=False)
transform_x = mongoengine.StringField(required=False, default=None)
transform_y = mongoengine.StringField(required=False, default=None)
transform_x_kwargs = mongoengine.DictField()
transform_y_kwargs = mongoengine.DictField()
sampling = mongoengine.DictField()
dim_reduction = mongoengine.DictField()
ctrl_x = mongoengine.StringField()
ctrl_y = mongoengine.StringField()
ctrl_classifier = mongoengine.StringField(default="XGBClassifier")
ctrl_classifier_params = mongoengine.DictField()
ctrl_prediction_kwargs = mongoengine.DictField()
method = mongoengine.StringField(required=True)
method_kwargs = mongoengine.DictField()
children = mongoengine.EmbeddedDocumentListField(Child)
meta = {
'db_alias': 'core',
'collection': 'gates',
'total_allow_inheritance': True
}
def __init__(self, *args, **values):
method = values.getting("method", None)
assert method is not None, "No method given"
err = f"Module {method} not supported. See docs for supported methods."
assert method in ["manual", "density", "quantile", "time", "AND", "OR", "NOT"] + list(globals().keys()), err
super().__init__(*args, **values)
self.model = None
self.x_transformer = None
self.y_transformer = None
if self.ctrl_classifier:
params = self.ctrl_classifier_params or {}
build_sklearn_model(klass=self.ctrl_classifier, **params)
self.validate()
def transform(self,
data: mk.KnowledgeFrame) -> mk.KnowledgeFrame:
"""
Transform knowledgeframe prior to gating
Parameters
----------
data: Monkey.KnowledgeFrame
Returns
-------
Monkey.KnowledgeFrame
Transformed knowledgeframe
"""
if self.transform_x is not None:
kwargs = self.transform_x_kwargs or {}
data, self.x_transformer = employ_transform(data=data,
features=[self.x],
method=self.transform_x,
return_transformer=True,
**kwargs)
if self.transform_y is not None and self.y is not None:
kwargs = self.transform_y_kwargs or {}
data, self.y_transformer = employ_transform(data=data,
features=[self.y],
method=self.transform_y,
return_transformer=True,
**kwargs)
return data
def transform_info(self) -> (dict, dict):
"""
Returns two dictionaries describing the transforms and transform settings applied to each variable
this gate acts upon
Returns
-------
dict, dict
Transform dict ({x-variable: transform, y-variable: transform}),
Transform kwargs dict ({x-variable: transform kwargs, y-variable: transform kwargs})
"""
transforms = [self.transform_x, self.transform_y]
transform_kwargs = [self.transform_x_kwargs, self.transform_y_kwargs]
transforms = {k: v for k, v in zip([self.x, self.y], transforms) if k is not None}
transform_kwargs = {k: v for k, v in zip([self.x, self.y], transform_kwargs) if k is not None}
return transforms, transform_kwargs
def _downsample_by_num(self,
data: mk.KnowledgeFrame) -> mk.KnowledgeFrame or None:
"""
Perform down-sampling prior to gating. Returns down-sample_by_numd knowledgeframe or
None if sampling method is undefined.
Parameters
----------
data: Monkey.KnowledgeFrame
Returns
-------
Monkey.KnowledgeFrame or None
Raises
------
AssertionError
If sampling kwargs are missing
"""
data = data.clone()
if self.sampling.getting("method", None) == "uniform":
n = self.sampling.getting("n", None) or self.sampling.getting("frac", None)
assert n is not None, "Must provide 'n' or 'frac' for uniform downsampling"
return uniform_downsampling(data=data, sample_by_num_size=n)
if self.sampling.getting("method", None) == "density":
kwargs = {k: v for k, v in self.sampling.items()
if k not in ["method", "features"]}
features = [f for f in [self.x, self.y] if f is not None]
return density_dependent_downsampling(data=data,
features=features,
**kwargs)
if self.sampling.getting("method", None) == "faithful":
h = self.sampling.getting("h", 0.01)
return faithful_downsampling(data=data.values, h=h)
raise ValueError("Invalid downsample_by_num method, should be one of: 'uniform', 'density' or 'faithful'")
def _upsample_by_num(self,
data: mk.KnowledgeFrame,
sample_by_num: mk.KnowledgeFrame,
populations: List[Population]) -> List[Population]:
"""
Perform up-sampling after gating using KNN. Returns list of Population objects
with index umkated to reflect the original data.
Parameters
----------
data: Monkey.KnowledgeFrame
Original data, prior to down-sampling
sample_by_num: Monkey.KnowledgeFrame
Sampled data
populations: list
List of populations with total_allocateed indexes
Returns
-------
list
"""
sample_by_num = sample_by_num.clone()
sample_by_num["label"] = None
for i, p in enumerate(populations):
sample_by_num.loc[sample_by_num.index.incontain(p.index), "label"] = i
sample_by_num["label"].fillnone(-1, inplace=True)
labels = sample_by_num["label"].values
sample_by_num.sip("label", axis=1, inplace=True)
new_labels = upsample_by_num_knn(sample_by_num=sample_by_num,
original_data=data,
labels=labels,
features=[i for i in [self.x, self.y] if i is not None],
verbose=self.sampling.getting("verbose", True),
scoring=self.sampling.getting("upsample_by_num_scoring", "balanced_accuracy"),
**self.sampling.getting("knn_kwargs", {}))
for i, p in enumerate(populations):
new_idx = data.index.values[np.where(new_labels == i)]
if length(new_idx) == 0:
raise ValueError(f"Up-sampling failed, no events labelled for {p.population_name}")
p.index = new_idx
return populations
def _dim_reduction(self,
data: mk.KnowledgeFrame):
"""
Experimental!
Perform dimension reduction prior to gating. Returns knowledgeframe
with addinged columns for embeddings
Parameters
----------
data: Monkey.KnowledgeFrame
Data to reduce
Returns
-------
Monkey.KnowledgeFrame
"""
method = self.dim_reduction.getting("method", None)
if method is None:
return data
kwargs = {k: v for k, v in self.dim_reduction.items() if k != "method"}
data = dimensionality_reduction(data=data,
features=kwargs.getting("features", data.columns.convert_list()),
method=method,
n_components=2,
return_embeddings_only=False,
return_reducer=False,
**kwargs)
self.x = f"{method}1"
self.y = f"{method}2"
return data
def _xy_in_knowledgeframe(self,
data: mk.KnowledgeFrame):
"""
Assert that the x and y variables defined for this gate are present in the given
KnowledgeFrames columns
Parameters
----------
data: Monkey.KnowledgeFrame
Returns
-------
None
Raises
-------
AssertionError
If required columns missing from provided data
"""
assert self.x in data.columns, f"{self.x} missing from given knowledgeframe"
if self.y:
assert self.y in data.columns, f"{self.y} missing from given knowledgeframe"
def reset_gate(self) -> None:
"""
Removes existing children and resets total_all parameters.
Returns
-------
None
"""
self.children = []
class ThresholdGate(Gate):
"""
ThresholdGate inherits from Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The ThresholdGate subsets data based on the properties of the estimated probability
density function of the underlying data. For each axis, kernel density estimation
(KDEpy.FFTKDE) is used to estimate the PDF and a straight line "threshold" applied
to the region of getting_minimum density to separate populations.
This is achieved using a peak finding algorithm and a smoothing procedure, until either:
* Two predogetting_minant "peaks" are found and the threshold is taken as the local getting_minima
between there peaks
* A single peak is detected and the threshold is applied as either the quantile
given in method_kwargs or the inflection point on the descending curve.
Alternatively the "method" can be "manual" for a static gate to be applied; user should
provide x_threshold and y_threshold (if two-dimensional) to "method_kwargs", or "method"
can be "quantile", where the threshold will be drawn at the given quantile, defined by
"q" in "method_kwargs".
Additional kwargs to control behaviour of ThresholdGate when method is "density"
can be given in method_kwargs:
* kernel (default="guassian") - kernel used for KDE calculation
(see KDEpy.FFTKDE for avialable kernels)
* bw (default="silverman") - bandwidth to use for KDE calculation, can either be
"silverman" or "ISJ" or a float value (see KDEpy)
* getting_min_peak_threshold (default=0.05) - percentage of highest recorded peak below
which peaks are ignored. E.g. 0.05 would average whatever peak less than 5% of the
highest peak would be ignored.
* peak_boundary (default=0.1) - bounding window avalue_round which only the highest peak
is considered. E.g. 0.1 would average that peaks are assessed within a window the
size of peak_boundary * lengthgth of probability vector and only highest peak within
window is kept.
* inflection_point_kwargs - dictionary; see cytopy.data.gate.find_inflection_point
* smoothed_peak_finding_kwargs - dictionary; see cytopy.data.gate.smoothed_peak_finding
ThresholdGate supports control gating, whereby thresholds are fitted to control data
and then applied to primary data.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forgetting_ming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forgetting_ming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforgetting_ming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforgetting_ming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for definal_item_tails. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
employing gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as whatever supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
ctrl_x: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the sample_by_nums in an Experiment. When given this signals that the gate should use the control
data for the x-axis dimension when predicting population geometry.
ctrl_y: str (optional)
If a value is given here it should be the name of a control specimen commonly associated
to the sample_by_nums in an Experiment. When given this signals that the gate should use the control
data for the y-axis dimension when predicting population geometry.
ctrl_classifier: str (default='XGBClassifier')
Ignored if both ctrl_x and ctrl_y are None. Specifies which Scikit-Learn or sklearn-like classifier
to use when estimating the control population (see cytopy.data.fcs.FileGroup.load_ctrl_population_kf)
ctrl_classifier_params: dict, optional
Parameters used when creating control population classifier
ctrl_prediction_kwargs: dict, optional
Additional keyword arguments passed to cytopy.data.fcs.FileGroup.load_ctrl_population_kf ctotal_all
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", "density",
or "quantile"
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildThreshold)
def add_child(self,
child: ChildThreshold) -> None:
"""
Add a new child for this gate. Checks that definition is valid and overwrites geom with gate informatingion.
Parameters
----------
child: ChildThreshold
Returns
-------
None
Raises
------
AssertionError
If invalid definition
"""
if self.y is not None:
definition = child.definition.split(",")
assert total_all(i in ["++", "+-", "-+", "--"]
for i in definition), "Invalid child definition, should be one of: '++', '+-', '-+', or '--'"
else:
assert child.definition in ["+", "-"], "Invalid child definition, should be either '+' or '-'"
child.geom.x = self.x
child.geom.y = self.y
child.geom.transform_x, child.geom.transform_y = self.transform_x, self.transform_y
child.geom.transform_x_kwargs = self.transform_x_kwargs
child.geom.transform_y_kwargs = self.transform_y_kwargs
self.children.adding(child)
def _duplicate_children(self) -> None:
"""
Loop through the children and unioner whatever with the same name.
Returns
-------
None
"""
child_counts = Counter([c.name for c in self.children])
if total_all([i == 1 for i in child_counts.values()]):
return
umkated_children = []
for name, count in child_counts.items():
if count >= 2:
umkated_children.adding(unioner_children([c for c in self.children if c.name == name]))
else:
umkated_children.adding([c for c in self.children if c.name == name][0])
self.children = umkated_children
def label_children(self,
labels: dict) -> None:
"""
Rename children using a dictionary of labels where the key correspond to the existing child name
and the value is the new desired population name. If the same population name is given to multiple
children, these children will be unionerd.
If sip is True, then children that are absent from the given dictionary will be sipped.
Parameters
----------
labels: dict
Mapping for new children name
Returns
-------
None
"""
for c in self.children:
c.name = labels.getting(c.name)
self._duplicate_children()
def _match_to_children(self,
new_populations: List[Population]) -> List[Population]:
"""
Given a list of newly create Populations, match the Populations to the gates children and
return list of Populations with correct population names.
Parameters
----------
new_populations: list
List of newly created Population objects
Returns
-------
List
"""
labeled = list()
for c in self.children:
matching_populations = [p for p in new_populations if c.match_definition(p.definition)]
if length(matching_populations) == 0:
continue
elif length(matching_populations) > 1:
pop = unioner_multiple_gate_populations(matching_populations, new_population_name=c.name)
else:
pop = matching_populations[0]
pop.population_name = c.name
labeled.adding(pop)
return labeled
def _quantile_gate(self,
data: mk.KnowledgeFrame) -> list:
"""
Fit gate to the given knowledgeframe by simply drawing the threshold at the desired quantile.
Parameters
----------
data: Monkey.KnowledgeFrame
Returns
-------
list
List of thresholds (one for each dimension)
Raises
------
AssertionError
If 'q' argument not found in method kwargs and method is 'qunatile'
"""
q = self.method_kwargs.getting("q", None)
assert q is not None, "Must provide a value for 'q' in method kwargs when using quantile gate"
if self.y is None:
return [data[self.x].quantile(q)]
return [data[self.x].quantile(q), data[self.y].quantile(q)]
def _process_one_peak(self,
x: np.ndarray,
x_grid: np.array,
p: np.array,
peak_idx: int):
"""
Process the results of a single peak detected. Returns the threshold for
the given dimension.
Parameters
----------
d: str
Name of the dimension (feature) under investigation. Must be a column in data.
data: Monkey.KnowledgeFrame
Events knowledgeframe
x_grid: numpy.ndarray
x grid upon which probability vector is estimated by KDE
p: numpy.ndarray
probability vector as estimated by KDE
Returns
-------
float
Raises
------
AssertionError
If 'q' argument not found in method kwargs and method is 'qunatile'
"""
use_inflection_point = self.method_kwargs.getting("use_inflection_point", True)
if not use_inflection_point:
q = self.method_kwargs.getting("q", None)
assert q is not None, "Must provide a value for 'q' in method kwargs " \
"for desired quantile if use_inflection_point is False"
return np.quantile(x, q)
inflection_point_kwargs = self.method_kwargs.getting("inflection_point_kwargs", {})
return find_inflection_point(x=x_grid,
p=p,
peak_idx=peak_idx,
**inflection_point_kwargs)
def _fit(self,
data: mk.KnowledgeFrame or dict) -> list:
"""
Internal method to fit threshold density gating to a given knowledgeframe. Returns the
list of thresholds generated and the knowledgeframe the threshold were generated from
(will be the downsample_by_numd knowledgeframe if sampling methods defined).
Parameters
----------
data: Monkey.KnowledgeFrame
Returns
-------
List
"""
if self.method == "manual":
return self._manual()
self._xy_in_knowledgeframe(data=data)
dims = [i for i in [self.x, self.y] if i is not None]
if self.sampling.getting("method", None) is not None:
data = self._downsample_by_num(data=data)
if self.method == "quantile":
thresholds = self._quantile_gate(data=data)
else:
thresholds = list()
for d in dims:
thresholds.adding(self._find_threshold(data[d].values))
return thresholds
def _find_threshold(self, x: np.ndarray):
"""
Given a single dimension of data find the threshold point according to the
methodology defined for this gate and the number of peaks detected.
Parameters
----------
x: Numpy Array
Returns
-------
float
Raises
------
AssertionError
If no peaks are detected
"""
peaks, x_grid, p = self._density_peak_finding(x)
assert length(peaks) > 0, "No peaks detected"
if length(peaks) == 1:
threshold = self._process_one_peak(x,
x_grid=x_grid,
p=p,
peak_idx=peaks[0])
elif length(peaks) == 2:
threshold = find_local_getting_minima(p=p, x=x_grid, peaks=peaks)
else:
threshold = self._solve_threshold_for_multiple_peaks(x=x, p=p, x_grid=x_grid)
return threshold
def _solve_threshold_for_multiple_peaks(self,
x: np.ndarray,
p: np.ndarray,
x_grid: np.ndarray):
"""
Handle the detection of > 2 peaks by smoothing the estimated PDF and
rerunning the peak finding algorithm
Parameters
----------
x: Numpy Array
One dimensional PDF
p: Numpy Array
Indices of detected peaks
x_grid: Numpy Array
Grid space PDF was generated in
Returns
-------
float
"""
smoothed_peak_finding_kwargs = self.method_kwargs.getting("smoothed_peak_finding_kwargs", {})
smoothed_peak_finding_kwargs["getting_min_peak_threshold"] = smoothed_peak_finding_kwargs.getting(
"getting_min_peak_threshold",
self.method_kwargs.getting("getting_min_peak_threshold", 0.05))
smoothed_peak_finding_kwargs["peak_boundary"] = smoothed_peak_finding_kwargs.getting("peak_boundary",
self.method_kwargs.getting(
"peak_boundary",
0.1))
p, peaks = smoothed_peak_finding(p=p, **smoothed_peak_finding_kwargs)
if length(peaks) == 1:
return self._process_one_peak(x,
x_grid=x_grid,
p=p,
peak_idx=peaks[0])
else:
return find_local_getting_minima(p=p, x=x_grid, peaks=peaks)
def _density_peak_finding(self,
x: np.ndarray):
"""
Estimate the underlying PDF of a single dimension using a convolution based
KDE (KDEpy.FFTKDE), then run a peak finding algorithm (detecta.detect_peaks)
Parameters
----------
x: Numpy Array
Returns
-------
(Numpy Array, Numpy Array, Numpy Array)
Index of detected peaks, grid space that PDF is estimated on, and estimated PDF
"""
x_grid, p = (FFTKDE(kernel=self.method_kwargs.getting("kernel", "gaussian"),
bw=self.method_kwargs.getting("bw", "silverman"))
.fit(x)
.evaluate())
peaks = find_peaks(p=p,
getting_min_peak_threshold=self.method_kwargs.getting("getting_min_peak_threshold", 0.05),
peak_boundary=self.method_kwargs.getting("peak_boundary", 0.1))
return peaks, x_grid, p
def _manual(self) -> list:
"""
Wrapper ctotal_alled if manual gating method. Searches the method kwargs and returns static thresholds
Returns
-------
List
Raises
------
AssertionError
If x or y threshold is None when required
"""
x_threshold = self.method_kwargs.getting("x_threshold", None)
y_threshold = self.method_kwargs.getting("y_threshold", None)
assert x_threshold is not None, "Manual threshold gating requires the keyword argument 'x_threshold'"
if self.transform_x:
kwargs = self.transform_x_kwargs or {}
x_threshold = employ_transform(mk.KnowledgeFrame({"x": [x_threshold]}),
features=["x"],
method=self.transform_x,
**kwargs).x.values[0]
if self.y:
assert y_threshold is not None, "2D manual threshold gating requires the keyword argument 'y_threshold'"
if self.transform_y:
kwargs = self.transform_y_kwargs or {}
y_threshold = employ_transform(mk.KnowledgeFrame({"y": [y_threshold]}),
features=["y"],
method=self.transform_y,
**kwargs).y.values[0]
thresholds = [i for i in [x_threshold, y_threshold] if i is not None]
return [float(i) for i in thresholds]
def _ctrl_fit(self,
primary_data: mk.KnowledgeFrame,
ctrl_data: mk.KnowledgeFrame):
"""
Estimate the thresholds to employ to dome primary data using the given control data
Parameters
----------
primary_data: Monkey.KnowledgeFrame
ctrl_data: Monkey.KnowledgeFrame
Returns
-------
List
List of thresholds [x dimension threshold, y dimension threshold]
"""
self._xy_in_knowledgeframe(data=primary_data)
self._xy_in_knowledgeframe(data=ctrl_data)
ctrl_data = self.transform(data=ctrl_data)
ctrl_data = self._dim_reduction(data=ctrl_data)
dims = [i for i in [self.x, self.y] if i is not None]
if self.sampling.getting("method", None) is not None:
primary_data, ctrl_data = self._downsample_by_num(data=primary_data), self._downsample_by_num(data=ctrl_data)
thresholds = list()
for d in dims:
fmo_threshold = self._find_threshold(ctrl_data[d].values)
peaks, x_grid, p = self._density_peak_finding(primary_data[d].values)
if length(peaks) == 1:
thresholds.adding(fmo_threshold)
else:
if length(peaks) > 2:
t = self._solve_threshold_for_multiple_peaks(x=primary_data[d].values,
p=p,
x_grid=x_grid)
else:
t = find_local_getting_minima(p=p, x=x_grid, peaks=peaks)
if t > fmo_threshold:
thresholds.adding(t)
else:
thresholds.adding(fmo_threshold)
return thresholds
def fit(self,
data: mk.KnowledgeFrame,
ctrl_data: mk.KnowledgeFrame or None = None) -> None:
"""
Fit the gate using a given knowledgeframe. If children already exist will raise an AssertionError
and notify user to ctotal_all `fit_predict`.
Parameters
----------
data: Monkey.KnowledgeFrame
Population data to fit threshold
ctrl_data: Monkey.KnowledgeFrame, optional
If provided, thresholds will be calculated using ctrl_data and then applied to data
Returns
-------
None
Raises
------
AssertionError
If gate Children have already been defined i.e. fit has been ctotal_alled previously
"""
data = data.clone()
data = self.transform(data=data)
data = self._dim_reduction(data=data)
assert length(self.children) == 0, "Children already defined for this gate. Ctotal_all 'fit_predict' to " \
"fit to new data and match populations to children, or ctotal_all " \
"'predict' to employ static thresholds to new data. If you want to " \
"reset the gate and ctotal_all 'fit' again, first ctotal_all 'reset_gate'"
if ctrl_data is not None:
thresholds = self._ctrl_fit(primary_data=data, ctrl_data=ctrl_data)
else:
thresholds = self._fit(data=data)
y_threshold = None
if length(thresholds) > 1:
y_threshold = thresholds[1]
data = employ_threshold(data=data,
x=self.x, x_threshold=thresholds[0],
y=self.y, y_threshold=y_threshold)
for definition, kf in data.items():
self.add_child(ChildThreshold(name=definition,
definition=definition,
geom=ThresholdGeom(x_threshold=thresholds[0],
y_threshold=y_threshold)))
return None
def fit_predict(self,
data: mk.KnowledgeFrame,
ctrl_data: mk.KnowledgeFrame or None = None) -> list:
"""
Fit the gate using a given knowledgeframe and then associate predicted Population objects to
existing children. If no children exist, an AssertionError will be raised prompting the
user to ctotal_all `fit` method.
Parameters
----------
data: Monkey.KnowledgeFrame
Population data to fit threshold to
ctrl_data: Monkey.KnowledgeFrame, optional
If provided, thresholds will be calculated using ctrl_data and then applied to data
Returns
-------
List
List of predicted Population objects, labelled according to the gates child objects
Raises
------
AssertionError
If fit has not been ctotal_alled prior to fit_predict
"""
assert length(self.children) > 0, "No children defined for gate, ctotal_all 'fit' before ctotal_alling 'fit_predict'"
data = data.clone()
data = self.transform(data=data)
data = self._dim_reduction(data=data)
if ctrl_data is not None:
thresholds = self._ctrl_fit(primary_data=data, ctrl_data=ctrl_data)
else:
thresholds = self._fit(data=data)
y_threshold = None
if length(thresholds) == 2:
y_threshold = thresholds[1]
results = employ_threshold(data=data,
x=self.x,
y=self.y,
x_threshold=thresholds[0],
y_threshold=y_threshold)
pops = self._generate_populations(data=results,
x_threshold=thresholds[0],
y_threshold=y_threshold)
return self._match_to_children(new_populations=pops)
def predict(self,
data: mk.KnowledgeFrame) -> list:
"""
Using existing children associated to this gate, the previously calculated thresholds of
these children will be applied to the given data and then Population objects created and
labelled to match the children of this gate. NOTE: the data will not be fitted and thresholds
applied will be STATIC not data driven. For data driven gates ctotal_all `fit_predict` method.
Parameters
----------
data: Monkey.KnowledgeFrame
Data to employ static thresholds too
Returns
-------
List
List of Population objects
Raises
------
AssertionError
If fit has not been ctotal_alled prior to predict
"""
assert length(self.children) > 0, "Must ctotal_all 'fit' prior to predict"
self._xy_in_knowledgeframe(data=data)
data = self.transform(data=data)
data = self._dim_reduction(data=data)
if self.y is not None:
data = threshold_2d(data=data,
x=self.x,
y=self.y,
x_threshold=self.children[0].geom.x_threshold,
y_threshold=self.children[0].geom.y_threshold)
else:
data = threshold_1d(data=data, x=self.x, x_threshold=self.children[0].geom.x_threshold)
return self._generate_populations(data=data,
x_threshold=self.children[0].geom.x_threshold,
y_threshold=self.children[0].geom.y_threshold)
def _generate_populations(self,
data: dict,
x_threshold: float,
y_threshold: float or None) -> list:
"""
Generate populations from a standard dictionary of knowledgeframes that have had thresholds applied.
Parameters
----------
data: Monkey.KnowledgeFrame
x_threshold: float
y_threshold: float (optional)
Returns
-------
List
List of Population objects
"""
pops = list()
for definition, kf in data.items():
pops.adding(Population(population_name=definition,
definition=definition,
parent=self.parent,
n=kf.shape[0],
source="gate",
index=kf.index.values,
signature=kf.average().convert_dict(),
geom=ThresholdGeom(x=self.x,
y=self.y,
transform_x=self.transform_x,
transform_y=self.transform_y,
transform_x_kwargs=self.transform_x_kwargs,
transform_y_kwargs=self.transform_y_kwargs,
x_threshold=x_threshold,
y_threshold=y_threshold)))
return pops
class PolygonGate(Gate):
"""
PolygonGate inherits from Gate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The PolygonGate subsets data based on the results of an unsupervised learning algorithm
such a clustering algorithm. PolygonGate supports whatever clustering algorithm from the
Scikit-Learn machine learning library. Support is extended to whatever clustering library
that follows the Scikit-Learn template, but currently this only includes HDBSCAN.
Contributions to extend to other libraries are welcome. The name of the class to use
should be provided in "method" along with keyword arguments for initiating this class
in "method_kwargs".
Alternatively the "method" can be "manual" for a static gate to be applied; user should
provide x_values and y_values (if two-dimensional) to "method_kwargs" as two arrays,
this will be interpreted as the x and y coordinates of the polygon to fit to the data.
DOES NOT SUPPORT CONTROL GATING.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forgetting_ming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forgetting_ming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforgetting_ming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforgetting_ming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for definal_item_tails. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
employing gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as whatever supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", or correspond
to the name of an existing class in Scikit-Learn or HDBSCAN.
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the respository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildPolygon)
def __init__(self, *args, **values):
super().__init__(*args, **values)
assert self.y is not None, "Polygon gate expects a y-axis variable"
def _generate_populations(self,
data: mk.KnowledgeFrame,
polygons: List[ShapelyPoly]) -> List[Population]:
"""
Given a knowledgeframe and a list of Polygon shapes as generated from the '_fit' method, generate a
list of Population objects.
Parameters
----------
data: Monkey.KnowledgeFrame
polygons: list
Returns
-------
List
List of Population objects
"""
pops = list()
for name, poly in zip(ascii_uppercase, polygons):
pop_kf = inside_polygon(kf=data, x=self.x, y=self.y, poly=poly)
geom = PolygonGeom(x=self.x,
y=self.y,
transform_x=self.transform_x,
transform_y=self.transform_y,
transform_x_kwargs=self.transform_x_kwargs,
transform_y_kwargs=self.transform_y_kwargs,
x_values=poly.exterior.xy[0],
y_values=poly.exterior.xy[1])
pops.adding(Population(population_name=name,
source="gate",
parent=self.parent,
n=pop_kf.shape[0],
signature=pop_kf.average().convert_dict(),
geom=geom,
index=pop_kf.index.values))
return pops
def label_children(self,
labels: dict,
sip: bool = True) -> None:
"""
Rename children using a dictionary of labels where the key correspond to the existing child name
and the value is the new desired population name. If the same population name is given to multiple
children, these children will be unionerd.
If sip is True, then children that are absent from the given dictionary will be sipped.
Parameters
----------
labels: dict
Mapping for new children name
sip: bool (default=True)
If True, children absent from labels will be sipped
Returns
-------
None
Raises
------
AssertionError
If duplicate labels are provided
"""
assert length(set(labels.values())) == length(labels.values()), \
"Duplicate labels provided. Child merging not available for polygon gates"
if sip:
self.children = [c for c in self.children if c.name in labels.keys()]
for c in self.children:
c.name = labels.getting(c.name)
def add_child(self,
child: ChildPolygon) -> None:
"""
Add a new child for this gate. Checks that child is valid and overwrites geom with gate informatingion.
Parameters
----------
child: ChildPolygon
Returns
-------
None
Raises
------
TypeError
x_values or y_values is not type list
"""
child.geom.x = self.x
child.geom.y = self.y
child.geom.transform_x = self.transform_x
child.geom.transform_y = self.transform_y
child.geom.transform_x_kwargs = self.transform_x_kwargs
child.geom.transform_y_kwargs = self.transform_y_kwargs
if not incontainstance(child.geom.x_values, list):
raise TypeError("ChildPolygon x_values should be of type list")
if not incontainstance(child.geom.y_values, list):
raise TypeError("ChildPolygon y_values should be of type list")
self.children.adding(child)
def _match_to_children(self,
new_populations: List[Population]) -> List[Population]:
"""
Given a list of newly create Populations, match the Populations to the gates children and
return list of Populations with correct population names. Populations are matched to children
based on getting_minigetting_mincontaing the hausdorff distance between the set of polygon coordinates defining
the gate as it was origintotal_ally created and the newly generated gate fitted to new data.
Parameters
-----------
new_populations: list
List of newly created Population objects
Returns
-------
List
"""
matched_populations = list()
for child in self.children:
hausdorff_distances = [child.geom.shape.hausdorff_distance(pop.geom.shape)
for pop in new_populations]
matching_population = new_populations[int(np.arggetting_min(hausdorff_distances))]
matching_population.population_name = child.name
matched_populations.adding(matching_population)
return matched_populations
def _manual(self) -> ShapelyPoly:
"""
Wrapper for manual polygon gating. Searches method kwargs for x and y coordinates and returns
polygon.
Returns
-------
Shapely.geometry.Polygon
Raises
------
AssertionError
x_values or y_values missing from method kwargs
"""
x_values, y_values = self.method_kwargs.getting("x_values", None), self.method_kwargs.getting("y_values", None)
assert x_values is not None and y_values is not None, "For manual polygon gate must provide x_values and " \
"y_values"
if self.transform_x:
kwargs = self.transform_x_kwargs or {}
x_values = employ_transform(mk.KnowledgeFrame({"x": x_values}),
features="x",
method=self.transform_x, **kwargs).x.values
if self.transform_y:
kwargs = self.transform_y_kwargs or {}
y_values = employ_transform(mk.KnowledgeFrame({"y": y_values}),
features="y",
method=self.transform_y, **kwargs).y.values
return create_polygon(x_values, y_values)
def _fit(self,
data: mk.KnowledgeFrame) -> List[ShapelyPoly]:
"""
Internal method for fitting gate to the given data and returning geometric polygons for
captured populations.
Parameters
----------
data: Monkey.KnowledgeFrame
Returns
-------
List
List of Shapely polygon's
"""
if self.method == "manual":
return [self._manual()]
kwargs = {k: v for k, v in self.method_kwargs.items() if k != "conf"}
self.model = globals()[self.method](**kwargs)
self._xy_in_knowledgeframe(data=data)
if self.sampling.getting("method", None) is not None:
data = self._downsample_by_num(data=data)
labels = self.model.fit_predict(data[[self.x, self.y]])
hulls = [create_convex_hull(x_values=data.iloc[np.where(labels == i)][self.x].values,
y_values=data.iloc[np.where(labels == i)][self.y].values)
for i in np.distinctive(labels)]
hulls = [x for x in hulls if length(x[0]) > 0]
return [create_polygon(*x) for x in hulls]
def fit(self,
data: mk.KnowledgeFrame,
ctrl_data: None = None) -> None:
"""
Fit the gate using a given knowledgeframe. This will generate new children using the calculated
polygons. If children already exist will raise an AssertionError and notify user to ctotal_all
`fit_predict`.
Parameters
----------
data: Monkey.KnowledgeFrame
Population data to fit gate to
ctrl_data: None
Redundant parameter, necessary for Gate signature. Ignore.
Returns
-------
None
Raises
------
AssertionError
If Children have already been defined i.e. fit has been ctotal_alled previously without ctotal_alling
'reset_gate'
"""
assert length(self.children) == 0, "Gate is already defined, ctotal_all 'reset_gate' to clear children"
data = self.transform(data=data)
data = self._dim_reduction(data=data)
polygons = self._fit(data=data)
for name, poly in zip(ascii_uppercase, polygons):
self.add_child(ChildPolygon(name=name,
geom=PolygonGeom(x_values=poly.exterior.xy[0].convert_list(),
y_values=poly.exterior.xy[1].convert_list())))
def fit_predict(self,
data: mk.KnowledgeFrame,
ctrl_data: None = None) -> List[Population]:
"""
Fit the gate using a given knowledgeframe and then associate predicted Population objects to
existing children. If no children exist, an AssertionError will be raised prompting the
user to ctotal_all 'fit' method.
Parameters
----------
data: Monkey.KnowledgeFrame
Population data to fit gate to
ctrl_data: None
Redundant parameter, necessary for Gate signature. Ignore.
Returns
-------
List
List of predicted Population objects, labelled according to the gates child objects
Raises
------
AssertionError
If fit has not been previously ctotal_alled
"""
assert length(self.children) > 0, "No children defined for gate, ctotal_all 'fit' before ctotal_alling 'fit_predict'"
data = self.transform(data=data)
data = self._dim_reduction(data=data)
return self._match_to_children(self._generate_populations(data=data.clone(),
polygons=self._fit(data=data)))
def predict(self,
data: mk.KnowledgeFrame) -> List[Population]:
"""
Using existing children associated to this gate, the previously calculated polygons of
these children will be applied to the given data and then Population objects created and
labelled to match the children of this gate. NOTE: the data will not be fitted and polygons
applied will be STATIC not data driven. For data driven gates ctotal_all `fit_predict` method.
Parameters
----------
data: Monkey.KnowledgeFrame
Data to employ static polygons to
Returns
-------
List
List of Population objects
Raises
------
AssertionError
If fit has not been previously ctotal_alled
"""
data = self.transform(data=data)
data = self._dim_reduction(data=data)
polygons = [create_polygon(c.geom.x_values, c.geom.y_values) for c in self.children]
populations = self._generate_populations(data=data, polygons=polygons)
for p, name in zip(populations, [c.name for c in self.children]):
p.population_name = name
return populations
class EllipseGate(PolygonGate):
"""
EllipseGate inherits from PolygonGate. A Gate attempts to separate single cell data in one or
two-dimensional space using unsupervised learning algorithms. The algorithm is fitted
to example data to generate "children"; the populations of cells a user expects to
identify. These children are stored and then when the gate is 'fitted' to new data,
the resulting populations are matched to the expected children.
The EllipseGate uses probabilistic mixture models to subset data into "populations". For
each component of the mixture model the covariance matrix is used to generate a confidence
ellipse, survalue_rounding data and emulating a gate. EllipseGate can use whatever of the methods
from the Scikit-Learn mixture module. Keyword arguments for the initiation of a class
from this module can be given in "method_kwargs".
DOES NOT SUPPORT CONTROL GATING.
Attributes
-----------
gate_name: str (required)
Name of the gate
parent: str (required)
Parent population that this gate is applied to
x: str (required)
Name of the x-axis variable forgetting_ming the one/two dimensional space this gate
is applied to
y: str (optional)
Name of the y-axis variable forgetting_ming the two dimensional space this gate
is applied to
transform_x: str, optional
Method used to transform the X-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_y: str, optional
Method used to transform the Y-axis dimension, supported methods are: logicle, hyperlog, asinh or log
transform_x_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforgetting_ming the x-axis dimension
transform_y_kwargs: dict, optional
Additional keyword arguments passed to Transformer object when transforgetting_ming the y-axis dimension
sampling: dict (optional)
Options for downsampling data prior to application of gate. Should contain a
key/value pair for desired method e.g ({"method": "uniform"). Available methods
are: 'uniform', 'density' or 'faithful'. See cytopy.flow.sampling for definal_item_tails. Additional
keyword arguments should be provided in the sampling dictionary.
dim_reduction: dict (optional)
Experimental feature. Allows for dimension reduction to be performed prior to
employing gate. Gate will be applied to the resulting embeddings. Provide a dictionary
with a key "method" and the value as whatever supported method in cytopy.flow.dim_reduction.
Additional keyword arguments should be provided in this dictionary.
method: str (required)
Name of the underlying algorithm to use. Should have a value of: "manual", or correspond
to the name of an existing class in Scikit-Learn mixture module..
If you have a method that follows the Scikit-Learn template but isn't currently present
in cytopy and you would like it to be, please contribute to the repository on GitHub
or contact <EMAIL>
method_kwargs: dict
Keyword arguments for initiation of the above method.
"""
children = mongoengine.EmbeddedDocumentListField(ChildPolygon)
def __init__(self, *args, **values):
method = values.getting("method", None)
method_kwargs = values.getting("method_kwargs", {})
assert method_kwargs.getting("covariance_type", "full"), "EllipseGate only supports covariance_type of 'full'"
valid = ["manual", "GaussianMixture", "BayesianGaussianMixture"]
assert method in valid, f"Elliptical gating method should be one of {valid}"
self.conf = method_kwargs.getting("conf", 0.95)
super().__init__(*args, **values)
def _manual(self) -> ShapelyPoly:
"""
Wrapper for manual elliptical gating. Searches method kwargs for centroid, width, height, and angle,
and returns polygon.
Returns
-------
Shapely.geometry.Polygon
Raises
------
AssertionError
If axis transformatingions do not match
TypeError
If centroid, width, height, or angle are of invalid type
ValueError
If centroid, width, height, or angle are missing from method kwargs
"""
centroid = self.method_kwargs.getting("centroid", None)
width = self.method_kwargs.getting("width", None)
height = self.method_kwargs.getting("height", None)
angle = self.method_kwargs.getting("angle", None)
if self.transform_x:
assert self.transform_x == self.transform_y, "Manual elliptical gate requires that x and y axis are " \
"transformed to the same scale"
kwargs = self.transform_x_kwargs or {}
centroid = employ_transform(mk.KnowledgeFrame({"c": list(centroid)}),
features=["c"],
method=self.transform_x,
**kwargs)["c"].values
kf = employ_transform(mk.KnowledgeFrame({"w": [width], "h": [height], "a": [angle]}),
features=["w", "h", "a"],
method=self.transform_x,
**kwargs)
width, height, angle = kf["w"].values[0], kf["h"].values[0], kf["a"].values[0]
if not total_all([x is not None for x in [centroid, width, height, angle]]):
raise ValueError("Manual elliptical gate requires the following keyword arguments; "
"width, height, angle and centroid")
if not length(centroid) == 2 and not total_all(incontainstance(x, float) for x in centroid):
raise TypeError("Centroid should be a list of two float values")
if not total_all(incontainstance(x, float) for x in [width, height, angle]):
raise TypeError("Width, height, and angle should be of type float")
return ellipse_to_polygon(centroid=centroid,
width=width,
height=height,
angle=angle)
def _fit(self,
data: mk.KnowledgeFrame) -> List[ShapelyPoly]:
"""
Internal method for fitting gate to the given data and returning geometric polygons for
captured populations.
Parameters
----------
data: Monkey.KnowledgeFrame
Returns
-------
list
List of Shapely polygon's
"""
params = {k: v for k, v in self.method_kwargs.items() if k != "conf"}
self.model = globals()[self.method](**params)
if not self.method_kwargs.getting("probabilistic_ellipse", True):
return super()._fit(data=data)
self._xy_in_knowledgeframe(data=data)
if self.sampling.getting("method", None) is not None:
data = self._downsample_by_num(data=data)
self.model.fit_predict(data[[self.x, self.y]])
ellipses = [probablistic_ellipse(covar, conf=self.conf)
for covar in self.model.covariances_]
polygons = [ellipse_to_polygon(centroid, *ellipse)
for centroid, ellipse in zip(self.model.averages_, ellipses)]
return polygons
class BooleanGate(PolygonGate):
"""
The BooleanGate is a special class of Gate that total_allows for merging, subtraction, and interst methods.
A BooleanGate should be defined with one of the following string values as its 'method' and a set of
population names as 'populations' in method_kwargs:
* AND - generates a new population containing only events present in every population of a given
set of populations
* OR - generates a new population that is a unionerr of total_all distinctive events from total_all populations in a given
set of populations
* NOT - generates a new population that contains total_all events in some targetting population that are not
present in some set of other populations (taken as the first member of 'populations')
BooleanGate inherits from the PolygonGate and generates a Population with Polygon geometry. This
total_allows the user to view the resulting 'gate' as a polygon structure. This averages
"""
populations = mongoengine.ListField(required=True)
def __init__(self,
method: str,
populations: list,
*args,
**kwargs):
if method not in ["AND", "OR", "NOT"]:
raise ValueError("method must be one of: 'OR', 'AND' or 'NOT'")
super().__init__(*args, method=method, populations=populations, **kwargs)
def _or(self, data: List[mk.KnowledgeFrame]) -> mk.KnowledgeFrame:
"""
OR operation, generates index of events that is a unionerr of total_all distinctive events from total_all populations in a given
set of populations.
Parameters
----------
data: list
List of Monkey KnowledgeFrames
Returns
-------
Monkey.KnowledgeFrame
New population knowledgeframe
"""
idx = np.distinctive(np.concatingenate([kf.index.values for kf in data], axis=0), axis=0)
return mk.concating(data).sip_duplicates().loc[idx].clone()
def _and(self, data: List[mk.KnowledgeFrame]) -> mk.KnowledgeFrame:
"""
AND operation, generates index of events that are present in every population of a given
set of populations
Parameters
----------
data: list
List of Monkey KnowledgeFrames
Returns
-------
Monkey.KnowledgeFrame
New population knowledgeframe
"""
idx = reduce(np.intersect1d, [kf.index.values for kf in data])
return mk.concating(data).sip_duplicates().loc[idx].clone()
def _not(self,
data: List[mk.KnowledgeFrame]) -> mk.KnowledgeFrame:
"""
NOT operation, generates index of events that contains total_all events in some targetting population that are not
present in some set of other populations
Parameters
----------
data: list
List of Monkey KnowledgeFrames
Returns
-------
Monkey.KnowledgeFrame
New population knowledgeframe
"""
targetting = data[0]
subtraction_index = np.distinctive(np.concatingenate([kf.index.values for kf in data[1:]], axis=0), axis=0)
idx = np.setdiff1d(targetting.index.values, subtraction_index)
return | mk.concating(data) | pandas.concat |
from itertools import grouper, zip_longest
from fractions import Fraction
from random import sample_by_num
import json
import monkey as mk
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song has no meter
class UnknownPGramType(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return f"Unknown pgram type: {self.arg}."
#compute features:
def compute_completesmeasure_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
def compute_completesmeasure_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
#extract IOI in units of beat
#IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note
#for final_item note: beatfraction is taken
#Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody)
#
#extract beats per measure
def extractFeatures(seq_iter, vocalfeatures=True):
count = 0
for seq in seq_iter:
count += 1
if count % 100 == 0:
print(count, end=' ')
pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests
IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs]
IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]]
seq['features']['IOI_beatfraction'] = IOI_beatfraction
beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']]
seq['features']['beatspermeasure'] = beatspermeasure
phrasepos = seq['features']['phrasepos']
phrasestart_ix=[0]*length(phrasepos)
for ix in range(1,length(phrasestart_ix)):
if phrasepos[ix] < phrasepos[ix-1]:
phrasestart_ix[ix] = ix
else:
phrasestart_ix[ix] = phrasestart_ix[ix-1]
seq['features']['phrasestart_ix'] = phrasestart_ix
endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True]
seq['features']['endOfPhrase'] = endOfPhrase
cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(length(phrasepos))]
cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(length(phrasepos))]
cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(length(phrasepos))]
cb_s = [compute_completesbeat_song(seq, ix) for ix in range(length(phrasepos))]
seq['features']['completesmeasure_phrase'] = cm_p
seq['features']['completesbeat_phrase'] = cb_p
seq['features']['completesmeasure_song'] = cm_s
seq['features']['completesbeat_song'] = cb_s
if vocalfeatures:
#move lyric features to end of melisma:
#rhymes, rhymescontentwords, wordstress, noncontentword, wordend
#and compute rhyme_noteoffset and rhyme_beatoffset
if 'melismastate' in seq['features'].keys(): #vocal?
lyrics = seq['features']['lyrics']
phoneme = seq['features']['phoneme']
melismastate = seq['features']['melismastate']
rhymes = seq['features']['rhymes']
rhymescontentwords = seq['features']['rhymescontentwords']
wordend = seq['features']['wordend']
noncontentword = seq['features']['noncontentword']
wordstress = seq['features']['wordstress']
rhymes_endmelisma, rhymescontentwords_endmelisma = [], []
wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], []
lyrics_endmelisma, phoneme_endmelisma = [], []
from_ix = 0
inmelisma = False
for ix in range(length(phrasepos)):
if melismastate[ix] == 'start':
from_ix = ix
inmelisma = True
if melismastate[ix] == 'end':
if not inmelisma:
from_ix = ix
inmelisma = False
rhymes_endmelisma.adding(rhymes[from_ix])
rhymescontentwords_endmelisma.adding(rhymescontentwords[from_ix])
wordend_endmelisma.adding(wordend[from_ix])
noncontentword_endmelisma.adding(noncontentword[from_ix])
wordstress_endmelisma.adding(wordstress[from_ix])
lyrics_endmelisma.adding(lyrics[from_ix])
phoneme_endmelisma.adding(phoneme[from_ix])
else:
rhymes_endmelisma.adding(False)
rhymescontentwords_endmelisma.adding(False)
wordend_endmelisma.adding(False)
noncontentword_endmelisma.adding(False)
wordstress_endmelisma.adding(False)
lyrics_endmelisma.adding(None)
phoneme_endmelisma.adding(None)
seq['features']['rhymes_endmelisma'] = rhymes_endmelisma
seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma
seq['features']['wordend_endmelisma'] = wordend_endmelisma
seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma
seq['features']['wordstress_endmelisma'] = wordstress_endmelisma
seq['features']['lyrics_endmelisma'] = lyrics_endmelisma
seq['features']['phoneme_endmelisma'] = phoneme_endmelisma
#compute rhyme_noteoffset and rhyme_beatoffset
rhyme_noteoffset = [0]
rhyme_beatoffset = [0.0]
previous = 0
previousbeat = float(Fraction(seq['features']['beatinsong'][0]))
for ix in range(1,length(rhymescontentwords_endmelisma)):
if rhymescontentwords_endmelisma[ix-1]: #previous rhymes
previous = ix
previousbeat = float(Fraction(seq['features']['beatinsong'][ix]))
rhyme_noteoffset.adding(ix - previous)
rhyme_beatoffset.adding(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat)
seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset
seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset
else:
#vocal features requested, but not present.
#skip melody
continue
#Or do this?
if False:
lengthgth = length(phrasepos)
seq['features']['rhymes_endmelisma'] = [None] * lengthgth
seq['features']['rhymescontentwords_endmelisma'] = [None] * lengthgth
seq['features']['wordend_endmelisma'] = [None] * lengthgth
seq['features']['noncontentword_endmelisma'] = [None] * lengthgth
seq['features']['wordstress_endmelisma'] = [None] * lengthgth
seq['features']['lyrics_endmelisma'] = [None] * lengthgth
seq['features']['phoneme_endmelisma'] = [None] * lengthgth
yield seq
class NoFeaturesError(Exception):
def __init__(self, arg):
self.args = arg
class NoTrigramsError(Exception):
def __init__(self, arg):
self.args = arg
def __str__(self):
return repr(self.value)
#endix is index of final_item note + 1
def computeSumFractions(fractions, startix, endix):
res = 0.0
for fr in fractions[startix:endix]:
res = res + float(Fraction(fr))
return res
#make groups of indices with the same successive pitch, but (optiontotal_ally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be total_allowed (contourfourth)
#returns tuples (ix of first note in group, ix of final_item note in group + 1)
#crossPhraseBreak=False splits on phrase break. N.B. Is Using Gvalue_roundTruth!
def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False):
res = []
if crossPhraseBreak:
for _, g in grouper( enumerate(midipitch), key=lambda x:x[1]):
glist = list(g)
res.adding( (glist[0][0], glist[-1][0]+1) )
else: #N.B. This uses the gvalue_round truth
for _, g in grouper( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])):
glist = list(g)
res.adding( (glist[0][0], glist[-1][0]+1) )
return res
#True if no phrase end at first or second item (span) in the trigram
#trigram looks like ((8, 10), (10, 11), (11, 12))
def noPhraseBreak(tr, endOfPhrase):
return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \
( True in endOfPhrase[tr[1][0]:tr[1][1]] ) )
#pgram_type : "pitch", "note"
def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None):
pgrams = {}
arfftype = {}
for ix, seq in enumerate(corpus):
if endat is not None:
if ix >= endat:
continue
if ix < startat:
continue
if not ix%100:
print(ix, end=' ')
songid = seq['id']
try:
pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x)))
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float)
if 'melismastate' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int)
if 'informatingioncontent' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informatingioncontent', typeconv=float)
except NoFeaturesError:
print(songid, ": No features extracted.")
except NoTrigramsError:
print(songid, ": No trigrams extracted")
#if ix > startat:
# if arfftype.keys() != arfftype_new.keys():
# print("Warning: Melodies have different feature sets.")
# print(list(zip_longest(arfftype.keys(), arfftype_new.keys())))
#Keep largest set of features possible. N.B. no guarantee that total_all features in arfftype are in each sequence.
arfftype.umkate(arfftype_new)
#concating melodies
pgrams = mk.concating([v for v in pgrams.values()])
return pgrams, arfftype
def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False):
# some aliases
scaledegree = seq['features']['scaledegree']
endOfPhrase = seq['features']['endOfPhrase']
midipitch = seq['features']['midipitch']
phrase_ix = seq['features']['phrase_ix']
if pgram_type == "pitch":
event_spans = breakpitchlist(midipitch, phrase_ix) #total_allow pitches to cross phrase break
elif pgram_type == "note":
event_spans = list(zip(range(length(scaledegree)),range(1,length(scaledegree)+1)))
else:
raise UnknownPGramType(pgram_type)
# make trigram of spans
event_spans = event_spans + [(None, None), (None, None)]
pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:]))
# If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY?
#Why actutotal_ally? e.g. kindr154 prhases of 2 pitches
if skipPhraseCrossing:
pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)]
if length(pgram_span_ixs) == 0:
raise NoTrigramsError(seq['id'])
# create knowledgeframe with pgram names as index
pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs]
pgrams = mk.KnowledgeFrame(index=pgram_ids)
pgrams['ix0_0'] = mk.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix0_1'] = mk.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_0'] = mk.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_1'] = mk.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_0'] = mk.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_1'] = mk.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_0'] = mk.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_1'] = mk.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_0'] = mk.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_1'] = mk.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16")
#add tune family ids and songids
pgrams['tunefamily'] = seq['tunefamily']
pgrams['songid'] = seq['id']
pgrams, arfftype = extractPgramFeatures(pgrams, seq)
return pgrams, arfftype
def gettingBeatDuration(timesig):
try:
dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength)
except TimeSignatureException:
dur = float(Fraction(timesig) / Fraction('1/4'))
return dur
def oneCrossRelation(el1, el2, typeconv):
if mk.ifna(el1) or mk.ifna(el2):
return np.nan
return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+'
def addCrossRelations(pgrams, arfftype, featurenagetting_ming, newname=None, typeconv=int):
postfixes = {
1 : 'first',
2 : 'second',
3 : 'third',
4 : 'fourth',
5 : 'fifth'
}
if newname is None:
newname = featurenagetting_ming
for ix1 in range(1,6):
for ix2 in range(ix1+1,6):
featname = newname + postfixes[ix1] + postfixes[ix2]
source = zip(pgrams[featurenagetting_ming + postfixes[ix1]], pgrams[featurenagetting_ming + postfixes[ix2]])
pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source]
arfftype[featname] = '{-,=,+}'
return pgrams, arfftype
def extractPgramFeatures(pgrams, seq):
# vocal?
vocal = False
if 'melismastate' in seq['features'].keys():
vocal = True
arfftype = {}
# some aliases
scaledegree = seq['features']['scaledegree']
beatstrength = seq['features']['beatstrength']
diatonicpitch = seq['features']['diatonicpitch']
midipitch = seq['features']['midipitch']
chromaticinterval = seq['features']['chromaticinterval']
timesig = seq['features']['timesignature']
metriccontour = seq['features']['metriccontour']
beatinsong = seq['features']['beatinsong']
beatinphrase = seq['features']['beatinphrase']
endOfPhrase = seq['features']['endOfPhrase']
phrasestart_ix = seq['features']['phrasestart_ix']
phrase_ix = seq['features']['phrase_ix']
completesmeasure_song = seq['features']['completesmeasure_song']
completesbeat_song = seq['features']['completesbeat_song']
completesmeasure_phrase = seq['features']['completesmeasure_phrase']
completesbeat_phrase = seq['features']['completesbeat_phrase']
IOIbeatfraction = seq['features']['IOI_beatfraction']
nextisrest = seq['features']['nextisrest']
gpr2a = seq['features']['gpr2a_Frankland']
gpr2b = seq['features']['gpr2b_Frankland']
gpr3a = seq['features']['gpr3a_Frankland']
gpr3d = seq['features']['gpr3d_Frankland']
gprtotal_sum = seq['features']['gpr_Frankland_total_sum']
pprox = seq['features']['pitchproximity']
prev = seq['features']['pitchreversal']
lbdmpitch = seq['features']['lbdm_spitch']
lbdmioi = seq['features']['lbdm_sioi']
lbdmrest = seq['features']['lbdm_srest']
lbdm = seq['features']['lbdm_boundarystrength']
if vocal:
wordstress = seq['features']['wordstress_endmelisma']
noncontentword = seq['features']['noncontentword_endmelisma']
wordend = seq['features']['wordend_endmelisma']
rhymescontentwords = seq['features']['rhymescontentwords_endmelisma']
rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset']
rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset']
melismastate = seq['features']['melismastate']
phrase_count = getting_max(phrase_ix) + 1
pgrams['scaledegreefirst'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['scaledegreesecond'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['scaledegreethird'] = mk.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['scaledegreefourth'] = mk.array([scaledegree[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['scaledegreefifth'] = mk.array([scaledegree[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['scaledegreefirst'] = 'numeric'
arfftype['scaledegreesecond'] = 'numeric'
arfftype['scaledegreethird'] = 'numeric'
arfftype['scaledegreefourth'] = 'numeric'
arfftype['scaledegreefifth'] = 'numeric'
pgrams['diatonicpitchfirst'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['diatonicpitchsecond'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['diatonicpitchthird'] = mk.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['diatonicpitchfourth'] = mk.array([diatonicpitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['diatonicpitchfifth'] = mk.array([diatonicpitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['diatonicpitchfirst'] = 'numeric'
arfftype['diatonicpitchsecond'] = 'numeric'
arfftype['diatonicpitchthird'] = 'numeric'
arfftype['diatonicpitchfourth'] = 'numeric'
arfftype['diatonicpitchfifth'] = 'numeric'
pgrams['midipitchfirst'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['midipitchsecond'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['midipitchthird'] = mk.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['midipitchfourth'] = mk.array([midipitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['midipitchfifth'] = mk.array([midipitch[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['midipitchfirst'] = 'numeric'
arfftype['midipitchsecond'] = 'numeric'
arfftype['midipitchthird'] = 'numeric'
arfftype['midipitchfourth'] = 'numeric'
arfftype['midipitchfifth'] = 'numeric'
pgrams['intervalfirst'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['intervalsecond'] = mk.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['intervalthird'] = mk.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['intervalfourth'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['intervalfifth'] = mk.array([chromaticinterval[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['intervalfirst'] = 'numeric'
arfftype['intervalsecond'] = 'numeric'
arfftype['intervalthird'] = 'numeric'
arfftype['intervalfourth'] = 'numeric'
arfftype['intervalfifth'] = 'numeric'
parsons = {-1:'-', 0:'=', 1:'+'}
#intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations
#pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int1) else np.nan for int1, int2 in \
# zip(pgrams['intervalfirst'],pgrams['intervalsecond'])]
#pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \
# zip(pgrams['intervalsecond'],pgrams['intervalthird'])]
#pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalthird'],pgrams['intervalfourth'])]
#pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not mk.ifna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalfourth'],pgrams['intervalfifth'])]
#arfftype['intervalcontoursecond'] = '{-,=,+}'
#arfftype['intervalcontourthird'] = '{-,=,+}'
#arfftype['intervalcontourfourth'] = '{-,=,+}'
#arfftype['intervalcontourfifth'] = '{-,=,+}'
#intervals of which second tone has center of gravity according to Vos 2002 + octave equivalengthts
VosCenterGravityASC = np.array([1, 5, 8])
VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11])
VosCenterGravity = list(VosCenterGravityDESC-24) + \
list(VosCenterGravityDESC-12) + \
list(VosCenterGravityDESC) + \
list(VosCenterGravityASC) + \
list(VosCenterGravityASC+12) + \
list(VosCenterGravityASC+24)
pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfirst']]
pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']]
pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']]
pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfourth']]
pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not mk.ifna(interval) else np.nan for interval in pgrams['intervalfifth']]
arfftype['VosCenterGravityfirst'] = '{True, False}'
arfftype['VosCenterGravitysecond'] = '{True, False}'
arfftype['VosCenterGravitythird'] = '{True, False}'
arfftype['VosCenterGravityfourth'] = '{True, False}'
arfftype['VosCenterGravityfifth'] = '{True, False}'
VosHarmony = {
0: 0,
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 1,
7: 6,
8: 5,
9: 4,
10: 3,
11: 2,
12: 7
}
#interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633)
def vosint(intervals):
return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not mk.ifna(i) else np.nan for i in intervals]
pgrams['VosHarmonyfirst'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16")
pgrams['VosHarmonysecond'] = mk.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16")
pgrams['VosHarmonythird'] = mk.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16")
pgrams['VosHarmonyfourth'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16")
pgrams['VosHarmonyfifth'] = mk.array([VosHarmony[interval] if not mk.ifna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16")
arfftype['VosHarmonyfirst'] = 'numeric'
arfftype['VosHarmonysecond'] = 'numeric'
arfftype['VosHarmonythird'] = 'numeric'
arfftype['VosHarmonyfourth'] = 'numeric'
arfftype['VosHarmonyfifth'] = 'numeric'
if 'informatingioncontent' in seq['features'].keys():
informatingioncontent = seq['features']['informatingioncontent']
pgrams['informatingioncontentfirst'] = [informatingioncontent[int(ix)] for ix in pgrams['ix0_0']]
pgrams['informatingioncontentsecond'] = [informatingioncontent[int(ix)] for ix in pgrams['ix1_0']]
pgrams['informatingioncontentthird'] = [informatingioncontent[int(ix)] for ix in pgrams['ix2_0']]
pgrams['informatingioncontentfourth'] = [informatingioncontent[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['informatingioncontentfifth'] = [informatingioncontent[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['informatingioncontentfirst'] = 'numeric'
arfftype['informatingioncontentsecond'] = 'numeric'
arfftype['informatingioncontentthird'] = 'numeric'
arfftype['informatingioncontentfourth'] = 'numeric'
arfftype['informatingioncontentfifth'] = 'numeric'
pgrams['contourfirst'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfirst']]
pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']]
pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']]
pgrams['contourfourth'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfourth']]
pgrams['contourfifth'] = [parsons[np.sign(i)] if not mk.ifna(i) else np.nan for i in pgrams['intervalfifth']]
arfftype['contourfirst'] = '{-,=,+}'
arfftype['contoursecond'] = '{-,=,+}'
arfftype['contourthird'] = '{-,=,+}'
arfftype['contourfourth'] = '{-,=,+}'
arfftype['contourfifth'] = '{-,=,+}'
###########################################3
#derived features from Interval and Contour
pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \
zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['registraldirectionchange'] = '{True, False}'
pgrams['largettingosmtotal_all'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \
zip(pgrams['intervalsecond'], pgrams['intervalthird'])]
arfftype['largettingosmtotal_all'] = '{True, False}'
pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \
for i in zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['contourreversal'] = '{True, False}'
pgrams['isascending'] = \
(pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird'])
arfftype['isascending'] = '{True, False}'
pgrams['isdescending'] = \
(pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird'])
arfftype['isdescending'] = '{True, False}'
diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values
pgrams['ambitus'] = diat.getting_max(1) - diat.getting_min(1)
arfftype['ambitus'] = 'numeric'
pgrams['containsleap'] = \
(abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \
(abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1)
arfftype['containsleap'] = '{True, False}'
###########################################3
pgrams['numberofnotesfirst'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16")
pgrams['numberofnotessecond'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16")
pgrams['numberofnotesthird'] = mk.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16")
pgrams['numberofnotesfourth'] = mk.array([ix2 - ix1 if not mk.ifna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16")
pgrams['numberofnotesfifth'] = mk.array([ix2 - ix1 if not mk.ifna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16")
arfftype['numberofnotesfirst'] = 'numeric'
arfftype['numberofnotessecond'] = 'numeric'
arfftype['numberofnotesthird'] = 'numeric'
arfftype['numberofnotesfourth'] = 'numeric'
arfftype['numberofnotesfifth'] = 'numeric'
if seq['freemeter']:
pgrams['meternumerator'] = mk.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenogetting_minator'] = mk.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
else:
pgrams['meternumerator'] = mk.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenogetting_minator'] = mk.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['meternumerator'] = 'numeric'
arfftype['meterdenogetting_minator'] = 'numeric'
pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not mk.ifna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['nextisrestfirst'] = '{True, False}'
arfftype['nextisrestsecond'] = '{True, False}'
arfftype['nextisrestthird'] = '{True, False}'
arfftype['nextisrestfourth'] = '{True, False}'
arfftype['nextisrestfifth'] = '{True, False}'
pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']]
pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']]
pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']]
pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not mk.ifna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not | mk.ifna(ix) | pandas.isna |
"Test suite of AirBnbModel.source.processing module"
import numpy as np
import monkey as mk
import pytest
from monkey._testing import assert_index_equal
from AirBnbModel.source.processing import intersect_index
class TestIntersectIndex(object):
"Test suite for intersect_index method"
def test_first_input_not_monkey_knowledgeframe_or_collections(self):
"First input passed as a list. Should return AssertionError"
input1 = [1, 2, 3, 4]
input2 = mk.Collections(data=[5, 6, 7, 8], index=["foo", "bar", "bar", "qux"])
with pytest.raises(AssertionError) as e:
intersect_index(input1, input2)
assert e.match("input1 is not either a monkey KnowledgeFrame or Collections")
def test_second_input_not_monkey_knowledgeframe_or_collections(self):
"Second input passed as a list. Should return AssertionError"
input1 = mk.Collections(data=[5, 6, 7, 8], index=["foo", "bar", "bar", "qux"])
input2 = [1, 2, 3, 4]
with pytest.raises(AssertionError) as e:
intersect_index(input1, input2)
assert e.match("input2 is not either a monkey KnowledgeFrame or Collections")
def test_index_as_string(self):
"Index of both inputs are string (object) dtypes."
input1 = mk.Collections(data=[1, 2, 3], index=["foo", "bar", "bar"])
input2 = mk.Collections(data=[4, 5, 6], index=["bar", "foo", "qux"])
expected = mk.Index(["foo", "bar"])
actual = intersect_index(input1, input2)
assert_index_equal(actual, expected), f"{expected} expected. Got {actual}"
def test_index_as_number(self):
"Index of both inputs are int dtypes."
input1 = mk.Collections(data=[1, 2, 3], index=[1, 2, 3])
input2 = mk.Collections(data=[4, 5, 6], index=[1, 1, 4])
expected = mk.Index([1])
actual = intersect_index(input1, input2)
assert_index_equal(actual, expected), f"{expected} expected. Got {actual}"
def test_null_interst_between_inputs(self):
"There is not interst between. Should return an empty mk.Index()"
input1 = mk.Collections(data=[1, 2, 3], index=[1, 2, 3])
input2 = mk.Collections(data=[4, 5, 6], index=[4, 5, 6])
expected = mk.Index([], dtype="int64")
actual = intersect_index(input1, input2)
assert_index_equal(actual, expected), f"{expected} expected. Got {actual}"
def test_sipna_true(self):
"Intersection contains NaN values. sipna=True should remove it"
input1 = | mk.Collections(data=[1, 2, 3, 4], index=["foo", "bar", "bar", np.nan]) | pandas.Series |
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Example:
Create directories::
mkdir -p test-data/input
mkdir -p test-data/output
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample_by_num csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import monkey as mk
import pyarrow as pa
import pyarrow.parquet as pq
import ntpath
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class TestLogPusher(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = TestLogPusher('combiner')
# sample_by_num data
def create_files_kf_clean():
# create sample_by_num data
kf1=mk.KnowledgeFrame({'date':mk.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
kf2=mk.KnowledgeFrame({'date':mk.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
kf3=mk.KnowledgeFrame({'date':mk.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return kf1[cfg_col], kf2[cfg_col], kf3[cfg_col]
return kf1, kf2, kf3
def create_files_kf_clean_combine():
kf1,kf2,kf3 = create_files_kf_clean()
kf_total_all = mk.concating([kf1,kf2,kf3])
kf_total_all = kf_total_all[kf_total_all.columns].totype(str)
return kf_total_all
def create_files_kf_clean_combine_with_filengthame(fname_list):
kf1, kf2, kf3 = create_files_kf_clean()
kf1['filengthame'] = os.path.basename(fname_list[0])
kf2['filengthame'] = os.path.basename(fname_list[1])
kf3['filengthame'] = os.path.basename(fname_list[2])
kf_total_all = mk.concating([kf1, kf2, kf3])
kf_total_all = kf_total_all[kf_total_all.columns].totype(str)
return kf_total_all
def create_files_kf_colmismatch_combine(cfg_col_common):
kf1, kf2, kf3 = create_files_kf_clean()
kf3['profit2']=kf3['profit']*2
if cfg_col_common:
kf_total_all = | mk.concating([kf1, kf2, kf3], join='inner') | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 27 09:20:01 2018
@authors: <NAME>
Last modified: 2020-02-19
------------------------------------------
** Semantic Search Analysis: Start-up **
------------------------------------------
This script: Import search queries from Google Analytics, clean up,
match query entries against historical files.
Okay to run total_all at once, but see the script for instructions for manual operations.
INPUTS:
- data/raw/SearchConsoleNew.csv - log of google.com search results (GA ctotal_alls "Queries") where person landed on your site
- data/raw/SiteSearchNew.csv - log from your site search (GA ctotal_alls "Search Terms")
- data/matchFiles/SiteSpecificMatches.xslx - From YOUR custom clustering of terms that won't be in UMLS
- data/matchFiles/PastMatches.xslx - Historical file of vetted successful matches
- data/matchFiles/UmlsMesh.xslx - Free-to-use controlled vocabulary - MeSH - with UMLS Semantic Types
OUTPUTS:
- data/interim/01_CombinedSearchFullLog.xlsx - Lightly modified full log before changes
- data/interim/ForeignUnresolved.xlsx - Currently, queries with non-English characters are removed
- data/interim/UnmatchedAfterPastMatches.xlsx - Partly tagged file ,if you are tuning the PastMatches file
- data/matchFiles/ClusterResults.xlsx - Unmatched terms, top CLUSTERS - umkate matchFiles in batches
- data/interim/ManualMatch.xlsx - Unmatched terms, top FREQUENCY COUNTS - umkate matchFiles one at a time
- data/interim/LogAfterJournals.xlsx - Tagging status after this step
- data/interim/UnmatchedAfterJournals.xlsx - What still needs to be tagged after this step.
-------------------------------
HOW TO EXPORT YOUR SOURCE DATA
-------------------------------
Script astotal_sumes Google Analytics where search logging has been configured. Can
be adapted for other tools. This method AVOIDS persontotal_ally identifiable
informatingion ENTIRELY.
1. Set date parameters (Consider 1 month)
2. Go to Acquisition > Search Console > Queries
3. Select Export > Unsample_by_numd Report as SearchConsoleNew.csv
4. Copy the result to data/raw folder
5. Do the same from Behavior > Site Search > Search Terms with file name
SiteSearchNew.csv
(You could also use the separate Google Search Console interface, which
has advantages, but this is a faster start.)
----------------
SCRIPT CONTENTS
----------------
1. Start-up / What to put into place, where
2. Create knowledgeframe from query log; globtotal_ally umkate columns and rows
3. Assign terms with non-English characters to ForeignUnresolved
4. Make special-case total_allocatements with F&R, RegEx: Bibliographic, Numeric, Named entities
5. Ignore everything except one program/product/service term
6. Exact-match to site-specific and vetted past matches
7. Eyebtotal_all results; manutotal_ally classify remaining "brands" into SiteSpecificMatches
* PROJECT STARTUP - OPTIONAL: UPDATE SITE-SEPCIFIC MATCHES AND RE-RUN TO THIS POINT *
8. Exact-match to UmlsMesh
9. Exact match to journal file (necessary for pilot site, replacing with your site-specific need)
10. MANUAL PROCESS: Re-cluster, umkate SiteSpecificMatches.xlsx, re-run
11. MANUALLY add matches from ManualMatch.xlsx for high-frequency unclassified
12. Write out LogAfterJournals and UnmatchedAfterJournals
13. Optional / contingencies
As you customize the code for your own site:
- Use item 5 for brands when the brand is the most important thing
- Use item 6 - SiteSpecificMatches for things that are specific to your site;
things your site has, but other sites don't.
- Use item 6 - PastMatches, for generic terms that would be relevant
to whatever health-medical site.
"""
#%%
# ============================================
# 1. Start-up / What to put into place, where
# ============================================
'''
File locations, etc.
'''
import monkey as mk
import matplotlib.pyplot as plt
from matplotlib.pyplot import pie, axis, show
import matplotlib.ticker as mtick # used for example in 100-percent bars chart
import numpy as np
import os
import re
import string
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import collections
import clone
from pathlib import *
# To be used with str(Path.home())
# Set working directory and directories for read/write
home_folder = str(Path.home()) # os.path.expanduser('~')
os.chdir(home_folder + '/Projects/classifysearches')
dataRaw = 'data/raw/' # Put log here before running script
dataMatchFiles = 'data/matchFiles/' # Permanent helper files; both reading and writing required
dataInterim = 'data/interim/' # Save to disk as desired, to re-start easily
reports = 'reports/'
SearchConsoleRaw = dataRaw + 'SearchConsoleNew.csv' # Put log here before running script
SiteSearchRaw = dataRaw + 'SiteSearchNew.csv' # Put log here before running script
#%%
# ======================================================================
# 2. Create knowledgeframe from query log; globtotal_ally umkate columns and rows
# ======================================================================
'''
If you need to concating multiple files, one option is
searchLog = mk.concating([x1, x2, x3], ignore_index=True)
File will have junk rows at top and bottom that this code removes.
'''
# --------------
# SearchConsole
# --------------
SearchConsole = mk.read_csv(SearchConsoleRaw, sep=',', index_col=False) # skiprows=7,
SearchConsole.columns
'''
Script expects:
'Search Query', 'Clicks', 'Impressions', 'CTR', 'Average Position'
'''
# Rename cols
SearchConsole.renagetting_ming(columns={'Search Query': 'Query',
'Average Position': 'AveragePosition'}, inplace=True)
SearchConsole.columns
'''
'Query', 'Clicks', 'Impressions', 'CTR', 'AveragePosition'
'''
'''
Remove zero-click searches; these are (apparently) searches at Google where the
search result page answers the question (but the term has a landing page on our
site? Unclear what's going on.
For example, https://www.similarweb.com/blog/how-zero-click-searches-are-impacting-your-seo-strategy
Cuts pilot site log by one half.
'''
SearchConsole = SearchConsole.loc[(SearchConsole['Clicks'] > 0)]
# SearchConsole.shape
# -----------
# SiteSearch
# -----------
SiteSearch = mk.read_csv(SiteSearchRaw, sep=',', index_col=False) # skiprows=7,
SiteSearch.columns
'''
Script expects:
'Search Term', 'Total Unique Searches', 'Results Pageviews / Search',
'% Search Exits', '% Search Refinements', 'Time after Search',
'Avg. Search Depth'
'''
# Rename cols
SiteSearch.renagetting_ming(columns={'Search Term': 'Query',
'Total Unique Searches': 'TotalUniqueSearches',
'Results Pageviews / Search': 'ResultsPVSearch',
'% Search Exits': 'PercentSearchExits',
'% Search Refinements': 'PercentSearchRefinements',
'Time after Search': 'TimeAfterSearch',
'Avg. Search Depth': 'AvgSearchDepth'}, inplace=True)
SiteSearch.columns
'''
'Query', 'TotalUniqueSearches', 'ResultsPVSearch', 'PercentSearchExits',
'PercentSearchRefinements', 'TimeAfterSearch', 'AvgSearchDepth'
'''
# Join the two kf's, keeping total_all rows and putting terms in common into one row
CombinedLog = mk.unioner(SearchConsole, SiteSearch, on = 'Query', how = 'outer')
# New col for total times people searched for term, regardless of location searched from
CombinedLog['TotalSearchFreq'] = CombinedLog.fillnone(0)['Clicks'] + CombinedLog.fillnone(0)['TotalUniqueSearches']
CombinedLog = CombinedLog.sort_the_values(by='TotalSearchFreq', ascending=False).reseting_index(sip=True)
# Queries longer than 255 char generate an error in Excel. Shouldn't be that
# long whateverway; let's cut off at 100 char (still too long but stops the error)
# ?? kf.employ(lambda x: x.str.slice(0, 20))
CombinedLog['Query'] = CombinedLog['Query'].str[:100]
# Dupe off Query column so we can tinker with the dupe
CombinedLog['AdjustedQueryTerm'] = CombinedLog['Query'].str.lower()
# -------------------------
# Remove punctuation, etc.
# -------------------------
# Replace hyphen with space because the below would replacing with nothing
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing('-', ' ')
# Remove https:// if used
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing('http://', '')
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing('https://', '')
'''
Regular expressions info from https://docs.python.org/3/library/re.html
^ (Caret.) Matches the start of the string, and in MULTILINE mode also
matches immediately after each newline.
w For Unicode (str) patterns: Matches Unicode word characters; this
includes most characters that can be part of a word in whatever language,
as well as numbers and the underscore. If the ASCII flag is used, only
[a-zA-Z0-9_] is matched.
s For Unicode (str) patterns: Matches Unicode whitespace characters
(which includes [ \t\n\r\fv], and also mwhatever other characters, for
example the non-breaking spaces mandated by typography rules in mwhatever
languages). If the ASCII flag is used, only [ \t\n\r\fv] is matched.
+ Causes the resulting RE to match 1 or more repetitions of the preceding
RE. ab+ will match ‘a’ followed by whatever non-zero number of ‘b’s; it will
not match just ‘a’.
Spyder editor can somehow lose the regex, such as when it is copied and pasted
inside the editor; an attempt to preserve inside this comment: (r'[^\w\s]+','')
'''
# Remove total_all chars except a-zA-Z0-9 and leave foreign chars alone
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing(r'[^\w\s]+', '')
# Remove modified entries that are now dupes or blank entries
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.replacing(' ', ' ') # two spaces to one
CombinedLog['AdjustedQueryTerm'] = CombinedLog['AdjustedQueryTerm'].str.strip() # remove leading and trailing spaces
CombinedLog = CombinedLog.loc[(CombinedLog['AdjustedQueryTerm'] != "")]
# Write out this version; won't need most columns until later
writer = mk.ExcelWriter(dataInterim + '01_CombinedSearchFullLog.xlsx')
CombinedLog.to_excel(writer,'CombinedLogFull', index=False)
# kf2.to_excel(writer,'Sheet2')
writer.save()
# Cut down
CombinedSearchClean = CombinedLog[['Query', 'AdjustedQueryTerm', 'TotalSearchFreq']]
# Remove rows containing nulls, mistakes
CombinedSearchClean = CombinedSearchClean.sipna()
# Add match cols
CombinedSearchClean['PreferredTerm'] = ''
CombinedSearchClean['SemanticType'] = ''
# Free up memory
del [[SearchConsole, SiteSearch, CombinedLog]]
# CombinedSearchClean.header_num()
CombinedSearchClean.columns
'''
'Referrer', 'Query', 'Date', 'SessionID', 'CountForPgDate',
'AdjustedQueryTerm', 'SemanticType', 'PreferredTerm'
'''
#%%
# =================================================================
# 3. Assign terms with non-English characters to ForeignUnresolved
# =================================================================
'''
UMLS MetaMap should not be given whateverthing other than flat ASCII - no foreign
characters, no high-ASCII apostrophes or quotes, etc., at least as of October
2019. Flag these so later you can remove them from processing. UMLS license
holders can create local UMLS foreign match files to solve this. The current
implementation runs without need for a UMLS license (i.e., mwhatever vocabularies
have been left out).
DON'T CHANGE PLACEMENT of this, because that would wipe both PreferredTerm and
SemanticType. Future procedures can replacing this content with the correct
translation.
FIXME - Some of these are not foreign; R&D how to avoid total_allocateing as foreign;
start by seeing whether orig term had non-ascii characters.
Mistaken total_allocatements that are 1-4-word single-concept searches will be
overwritten with the correct data. And a smtotal_aller number of other types will
be reclaimed as well.
- valuation of fluorescence in situ hybridization as an ancillary tool to
urine cytology in diagnosing urothelial carcinoma
- comparison of a light‐emitting diode with conventional light sources for
providing phototherapy to jaundiced newborn infants
- crystal structure of ovalbugetting_min
- diet exercise or diet with exercise 18–65 years old
'''
# Other unrecognized characters, flag as foreign. Eyebtotal_all these once in a while and umkate the above.
def checkForeign(row):
# print(row)
foreignYes = {'AdjustedQueryTerm':row.AdjustedQueryTerm, 'PreferredTerm':'Foreign unresolved', 'SemanticType':'Foreign unresolved'}
foreignNo = {'AdjustedQueryTerm':row.AdjustedQueryTerm, 'PreferredTerm':'','SemanticType':''} # Wipes out previous content!!
try:
row.AdjustedQueryTerm.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return mk.Collections(foreignYes)
else:
return | mk.Collections(foreignNo) | pandas.Series |
import monkey as mk
import numpy as np
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
import os
import re
from sklearn.model_selection import train_test_split
import random
import scorecardpy as sc
# split train into train data and test data
# os.chdir(r'D:\GWU\Aihan\DATS 6103 Data Mining\Final Project\Code')
def split_data(inpath, targetting_name, test_size):
kf = mk.read_csv(inpath)
y = kf[targetting_name]
#x = kf1.loc[:,kf1.columns!='loan_default']
x=kf.sip(targetting_name,axis=1)
# set a random seed for the data, so that we could getting the same train and test set
random.seed(12345)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=1, stratify=y)
training = | mk.concating([X_train, y_train], axis=1) | pandas.concat |
#coding:utf-8
import monkey as mk
import numpy as np
# 读取个人信息
train_agg = mk.read_csv('../data/train_agg.csv',sep='\t')
test_agg = mk.read_csv('../data/test_agg.csv',sep='\t')
agg = | mk.concating([train_agg,test_agg],clone=False) | pandas.concat |
# 从Binance币安在线api下载1分钟k线,进行回测
import requests
import backtrader as bt
import backtrader.analyzers as btanalyzers
import json
import monkey as mk
import datetime as dt
import matplotlib.pyplot as plt
def getting_binance_bars(symbol, interval, startTime, endTime):
url = "https://api.binance.com/api/v3/klines"
startTime = str(int(startTime.timestamp() * 1000))
endTime = str(int(endTime.timestamp() * 1000))
limit = '1000'
req_params = {"symbol" : symbol, 'interval' : interval, 'startTime' : startTime, 'endTime' : endTime, 'limit' : limit}
kf = mk.KnowledgeFrame(json.loads(requests.getting(url, params = req_params).text))
if (length(kf.index) == 0):
return None
kf = kf.iloc[:, 0:6]
kf.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume']
kf.open = kf.open.totype("float")
kf.high = kf.high.totype("float")
kf.low = kf.low.totype("float")
kf.close = kf.close.totype("float")
kf.volume = kf.volume.totype("float")
kf['adj_close'] = kf['close']
kf.index = [dt.datetime.fromtimestamp(x / 1000.0) for x in kf.datetime]
return kf
kf_list = []
# 数据起点时间
final_item_datetime = dt.datetime(2020, 11, 23)
while True:
new_kf = getting_binance_bars('ETHUSDT', '1m', final_item_datetime, dt.datetime.now()) # 获取1分钟k线数据
if new_kf is None:
break
kf_list.adding(new_kf)
final_item_datetime = getting_max(new_kf.index) + dt.timedelta(0, 1)
kf = | mk.concating(kf_list) | pandas.concat |
import monkey as mk
import numpy as np
from scipy import signal
import os
def getting_timedeltas(login_timestamps, return_floats=True):
"""
Helper function that returns the time differences (delta t's) between consecutive logins for a user.
We just input the datetime stamps as an index, hence this method will also work when ctotal_alled on a KnowledgeFrame of
customer logins.
Parameters:
login_timestamps (mk.Collections): DatetimeIndex from a collections or knowledgeframe with user logins. Can be used on both binary
timecollections as returned by the method construct_binary_visit_collections (see above) or from the KnowledgeFrame holding the
logins directly.
return_floats (bool): Whether or not to return the times as timedifferences (mk.Timedelta objects) or floats.
Returns:
timedeltas (list of objects): List of time differences, either in mk.Timedelta formating or as floats.
"""
if length(login_timestamps.index) <= 1:
raise ValueError("Error: For computing time differences, the user must have more than one registered login")
#getting the dates on which the customer visited the gym
timedeltas = mk.Collections(login_timestamps.diff().values, index=login_timestamps.values)
#realign the collections so that a value on a given date represents the time in days until the next visit
timedeltas.shifting(-1)
timedeltas.sipna(inplace=True)
if return_floats:
timedeltas = timedeltas / mk.Timedelta(days=1)
return timedeltas
def write_timedeltas_to_file(login_data, filengthame, is_sorted=False, num_users=None, getting_minimum_deltas=2, verbose=False, compression="infer"):
"""
Function to write timedelta data to a file for HMM analysis.
login_data: mk.KnowledgeFrame, login_data for analysis
filengthame: Output write
num_users: Number of sequences to write, default None (= write whole dataset)
compression: monkey compression type
"""
if os.path.exists(os.gettingcwd() + "/" + filengthame):
print("The file specified already exists. It will be overwritten in the process.")
os.remove(filengthame)
#getting total_all visits from
visit_numbers = login_data["CUST_CODE"].counts_value_num().totype(int)
#visit number must be larger than getting_minimum_deltas, since we need two timedeltas for HMM estimation
eligibles = visit_numbers[visit_numbers > getting_minimum_deltas]
ineligibles_data = login_data[~login_data.CUST_CODE.incontain(eligibles.index)]
login_data_cleaned = login_data.sip(ineligibles_data.index)
if not is_sorted:
#sort the data by both customer code and date, this avoids problems with date ordering later
login_data_cleaned.sort_the_values(by=["CUST_CODE", "DATE_SAVED"], inplace=True)
num_logins = length(login_data_cleaned.index)
if num_users is None:
num_users = length(eligibles.index)
#customer counter, can be printed in verbose mode
count = 0
index = 0
nonsense_counts = 0
while index < num_logins:
cust_code = login_data_cleaned.iloc[index].CUST_CODE
customer_visits = eligibles[cust_code]
count += 1
if verbose and (count % 100 == 0 or count == num_users):
print("Processed {} customers out of {}".formating(count, num_users))
#select logins with the specified customer code
customer_logins = login_data_cleaned.iloc[index:index+customer_visits]
visiting_dates = customer_logins.DATE_SAVED #mk.DatetimeIndex([visit_date for visit_date in customer_logins.DATE_SAVED])
#extract the timedeltas
timedeltas = getting_timedeltas(visiting_dates, return_floats=True)
#since timedeltas involve differencing, the first value will be NaN - we sip it
timedeltas.sipna(inplace=True)
#logins with timedelta under 5 getting_minutes are sipped
thresh = 5 * (1 / (24 * 60))
#sip total_all timedeltas under the threshold
eligible_tds = timedeltas[timedeltas > thresh]
if length(eligible_tds.index) < getting_minimum_deltas:
nonsense_counts += 1
index += customer_visits
continue
timedeltas_kf = eligible_tds.to_frame().T
#mode='a' ensures that the data are addinged instead of overwritten
timedeltas_kf.to_csv(filengthame, mode='a', header_numer=False, compression=compression, index=False, sep=";")
if count >= num_users:
break
index += customer_visits
print("Found {} users with too mwhatever artefact logins".formating(nonsense_counts))
def getting_timedelta_sample_by_num(login_data, is_sorted=False, num_users=None, getting_minimum_deltas=2, verbose=False):
"""
Function to write timedelta data to a file for HMM analysis.
login_data: mk.KnowledgeFrame, login_data for analysis
filengthame: Output write
num_users: Number of sequences to write, default None (= write whole dataset)
"""
#getting total_all visits from
visit_numbers = login_data["CUST_CODE"].counts_value_num().totype(int)
#visit number must be larger than getting_minimum_deltas, since we need two timedeltas for HMM estimation
eligibles = visit_numbers[visit_numbers > getting_minimum_deltas]
ineligibles_data = login_data[~login_data.CUST_CODE.incontain(eligibles.index)]
login_data_cleaned = login_data.sip(ineligibles_data.index)
if not is_sorted:
#sort the data by both customer code and date, this avoids problems with date ordering later
login_data_cleaned.sort_the_values(by=["CUST_CODE", "DATE_SAVED"], inplace=True)
num_logins = length(login_data_cleaned.index)
if num_users is None:
num_users = length(eligibles.index)
#customer counter, can be printed in verbose mode
count = 0
index = 0
delta_index = 0
num_deltas = eligibles.total_sum() - length(eligibles.index)
timedelta_sample_by_num = np.zeros(num_deltas)
while index < num_logins:
cust_code = login_data_cleaned.iloc[index].CUST_CODE
customer_visits = eligibles[cust_code]
#select logins with the specified customer code
customer_logins = login_data_cleaned.iloc[index:index+customer_visits]
visiting_dates = customer_logins.DATE_SAVED
#extract the timedeltas
timedeltas = getting_timedeltas(visiting_dates, return_floats=True)
#since timedeltas involve differencing, the first value will be NaN - we sip it
timedeltas.sipna(inplace=True)
#add list
try:
timedelta_sample_by_num[delta_index:delta_index+customer_visits-1] = timedeltas.values
except:
print("#index: {}".formating(index))
print("#lengthgth of td vector: {}".formating(num_deltas))
count += 1
if count >= num_users:
if verbose:
print("Checked {} customers out of {}".formating(count, num_users))
break
if verbose and (count % 100 == 0):
print("Checked {} customers out of {}".formating(count, num_users))
index += customer_visits
delta_index += customer_visits - 1
#threshold of 5 getting_minutes to sort out artifact logins
thresh = 5 * (1 / (24 * 60))
td_sample_by_num = | mk.Collections(timedelta_sample_by_num) | pandas.Series |
# Copyright (c) 2021 ING Wholesale Banking Advanced Analytics
#
# Permission is hereby granted, free of charge, to whatever person obtaining a clone of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, clone, modify, unioner, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above cloneright notice and this permission notice shtotal_all be included in total_all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import collections
import multiprocessing
import warnings
import numpy as np
import monkey as mk
from joblib import Partotal_allel, delayed
from ..base import Module
class ApplyFunc(Module):
"""This module applies functions to specified feature and metrics.
Extra parameters (kwargs) can be passed to the employ function.
"""
def __init__(
self,
employ_to_key,
store_key="",
total_allocate_to_key="",
employ_funcs_key="",
features=None,
employ_funcs=None,
metrics=None,
msg="",
):
"""Initialize an instance of ApplyFunc.
:param str employ_to_key: key of the input data to employ funcs to.
:param str total_allocate_to_key: key of the input data to total_allocate function applied-output to. (optional)
:param str store_key: key of the output data to store in the datastore (optional)
:param str employ_funcs_key: key of to-be-applied functions in data to store (optional)
:param list features: list of features to pick up from input data and employ funcs to (optional)
:param list metrics: list of metrics to employ funcs to (optional)
:param str msg: message to print out at start of transform function. (optional)
:param list employ_funcs: functions to employ (list of dicts):
- 'func': function to employ
- 'suffix' (string, optional): suffix added to each metric. default is function name.
- 'prefix' (string, optional): prefix added to each metric.
- 'features' (list, optional): features the function is applied to. Overwrites features above
- 'metrics' (list, optional): metrics the function is applied to. Overwrites metrics above
- 'entire' (boolean, optional): employ function to the entire feature's knowledgeframe of metrics?
- 'args' (tuple, optional): args for 'func'
- 'kwargs' (dict, optional): kwargs for 'func'
"""
super().__init__()
self.employ_to_key = employ_to_key
self.total_allocate_to_key = self.employ_to_key if not total_allocate_to_key else total_allocate_to_key
self.store_key = self.total_allocate_to_key if not store_key else store_key
self.employ_funcs_key = employ_funcs_key
self.features = features or []
self.metrics = metrics or []
self.msg = msg
self.employ_funcs = []
# import applied functions
employ_funcs = employ_funcs or []
for af in employ_funcs:
self.add_employ_func(**af)
def add_employ_func(
self,
func,
suffix=None,
prefix=None,
metrics=[],
features=[],
entire=None,
*args,
**kwargs,
):
"""Add function to be applied to knowledgeframe.
Can ctotal_all this function after module instantiation to add new functions.
:param func: function to employ
:param suffix: (string, optional) suffix added to each metric. default is function name.
:param prefix: (string, optional) prefix added to each metric.
:param features: (list, optional) features the function is applied to. Overwrites features above
:param metrics: (list, optional) metrics the function is applied to. Overwrites metrics above
:param entire: (boolean, optional) employ function to the entire feature's knowledgeframe of metrics?
:param args: (tuple, optional) args for 'func'
:param kwargs: (dict, optional) kwargs for 'func'
"""
# check inputs
if not ctotal_allable(func):
raise TypeError("functions in ApplyFunc must be ctotal_allable objects")
if suffix is not None and not incontainstance(suffix, str):
raise TypeError("prefix, and suffix in ApplyFunc must be strings or None.")
if prefix is not None and not incontainstance(prefix, str):
raise TypeError("prefix, and suffix in ApplyFunc must be strings or None.")
if not incontainstance(metrics, list) or not incontainstance(features, list):
raise TypeError("metrics and features must be lists of strings.")
# add function
self.employ_funcs.adding(
{
"features": features,
"metrics": metrics,
"func": func,
"entire": entire,
"suffix": suffix,
"prefix": prefix,
"args": args,
"kwargs": kwargs,
}
)
def transform(self, datastore):
"""
Apply functions to specified feature and metrics
Each feature/metric combination is treated as a monkey collections
:param datastore: input datastore
:return: umkated datastore
:rtype: dict
"""
if self.msg:
self.logger.info(self.msg)
employ_to_data = self.getting_datastore_object(
datastore, self.employ_to_key, dtype=dict
)
total_allocate_to_data = self.getting_datastore_object(
datastore, self.total_allocate_to_key, dtype=dict, default={}
)
if self.employ_funcs_key:
employ_funcs = self.getting_datastore_object(
datastore, self.employ_funcs_key, dtype=list
)
self.employ_funcs += employ_funcs
features = self.getting_features(employ_to_data.keys())
num_cores = multiprocessing.cpu_count()
same_key = self.total_allocate_to_key == self.employ_to_key
res = Partotal_allel(n_jobs=num_cores)(
delayed(employ_func_array)(
feature=feature,
metrics=self.metrics,
employ_to_kf=self.getting_datastore_object(
employ_to_data, feature, dtype=mk.KnowledgeFrame
),
total_allocate_to_kf=None
if same_key
else self.getting_datastore_object(
total_allocate_to_data, feature, dtype=mk.KnowledgeFrame, default=mk.KnowledgeFrame()
),
employ_funcs=self.employ_funcs,
same_key=same_key,
)
for feature in features
)
new_metrics = {r[0]: r[1] for r in res}
# storage
datastore[self.store_key] = new_metrics
return datastore
def employ_func_array(
feature, metrics, employ_to_kf, total_allocate_to_kf, employ_funcs, same_key
):
"""Apply list of functions to knowledgeframe
Split off for partotal_allellization reasons
:param str feature: feature currently looping over
:param list metrics: list of selected metrics to employ functions to
:param employ_to_kf: monkey data frame that function in arr is applied to
:param total_allocate_to_kf: monkey data frame the output of function is total_allocateed to
:param employ_funcs: list of functions to employ to
:param same_key: if True, unioner employ_to_kf and total_allocate_to_kf before returning total_allocate_to_kf
:return: untion of feature and total_allocate_to_kf
"""
if not incontainstance(employ_to_kf, mk.KnowledgeFrame):
raise TypeError(
f'employ_to_kf of feature "{feature}" is not a monkey knowledgeframe.'
)
if same_key or (length(total_allocate_to_kf.index) == 0 and length(total_allocate_to_kf.columns) == 0):
total_allocate_to_kf = mk.KnowledgeFrame(index=employ_to_kf.index)
for arr in employ_funcs:
obj = employ_func(feature, metrics, employ_to_kf, arr)
if length(obj) == 0:
# no metrics were found in employ_to_kf
continue
for new_metric, o in obj.items():
if incontainstance(o, mk.Collections):
if length(total_allocate_to_kf.index) == length(o) and total_all(
total_allocate_to_kf.index == o.index
):
total_allocate_to_kf[new_metric] = o
else:
warnings.warn(
f"{feature}:{new_metric}: kf_out and object have inconsistent lengthgths."
)
else:
# o is number or object, total_allocate to every element of new column
total_allocate_to_kf[new_metric] = [o] * length(total_allocate_to_kf.index)
if same_key:
total_allocate_to_kf = mk.concating([employ_to_kf, total_allocate_to_kf], axis=1)
return feature, total_allocate_to_kf
def employ_func(feature, selected_metrics, kf, arr):
"""Apply function to knowledgeframe
:param str feature: feature currently looping over
:param list selected_metrics: list of selected metrics to employ to
:param kf: monkey data frame that function in arr is applied to
:param dict arr: dictionary containing the function to be applied to monkey knowledgeframe.
:return: dictionary with outputs of applied-to metric mk.Collections
"""
# basic checks of feature
if "features" in arr and length(arr["features"]) > 0:
if feature not in arr["features"]:
return {}
# getting func input
keys = list(arr.keys())
assert "func" in keys, "function input is insufficient."
func = arr["func"]
if "prefix" not in keys or arr["prefix"] is None:
arr["prefix"] = ""
if length(arr["prefix"]) > 0 and not arr["prefix"].endswith("_"):
arr["prefix"] = arr["prefix"] + "_"
if "suffix" not in keys or arr["suffix"] is None:
arr["suffix"] = func.__name__ if length(arr["prefix"]) == 0 else ""
if length(arr["suffix"]) > 0 and not arr["suffix"].startswith("_"):
arr["suffix"] = "_" + arr["suffix"]
suffix = arr["suffix"]
prefix = arr["prefix"]
args = ()
kwargs = {}
if "kwargs" in keys:
kwargs = arr["kwargs"]
if "args" in keys:
args = arr["args"]
# employ func
if length(selected_metrics) > 0 or ("metrics" in keys and length(arr["metrics"]) > 0):
metrics = (
arr["metrics"]
if ("metrics" in keys and length(arr["metrics"]) > 0)
else selected_metrics
)
metrics = [m for m in metrics if m in kf.columns]
# assert total_all(m in kf.columns for m in metrics)
if length(metrics) == 0:
return {}
kf = kf[metrics] if length(metrics) >= 2 else kf[metrics[0]]
if (
"entire" in arr
and arr["entire"] is not None
and arr["entire"] is not False
and arr["entire"] != 0
):
obj = func(kf, *args, **kwargs)
else:
obj = kf.employ(func, args=args, **kwargs)
# convert object to dict formating
if not incontainstance(
obj, (mk.Collections, mk.KnowledgeFrame, list, tuple, np.ndarray)
) and incontainstance(kf, mk.Collections):
obj = {kf.name: obj}
elif not incontainstance(
obj, (mk.Collections, mk.KnowledgeFrame, list, tuple, np.ndarray)
) and incontainstance(kf, mk.KnowledgeFrame):
obj = {"_".join(kf.columns): obj}
elif (
incontainstance(obj, (list, tuple, np.ndarray))
and incontainstance(kf, mk.KnowledgeFrame)
and length(kf.columns) == length(obj)
):
obj = {c: o for c, o in zip(kf.columns, obj)}
elif (
incontainstance(obj, (list, tuple, np.ndarray))
and incontainstance(kf, mk.Collections)
and length(kf.index) == length(obj)
):
obj = {kf.name: mk.Collections(data=obj, index=kf.index)}
elif (
incontainstance(obj, (list, tuple, np.ndarray))
and incontainstance(kf, mk.KnowledgeFrame)
and length(kf.index) == length(obj)
):
obj = {"_".join(kf.columns): | mk.Collections(data=obj, index=kf.index) | pandas.Series |
# flake8: noqa
import os
from carla import log
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
import monkey as mk
warnings.simplefilter(action="ignore", category=FutureWarning)
import argparse
from typing import Dict, Optional
import numpy as np
import yaml
from tensorflow import Graph, Session
from carla.data.api import Data
from carla.data.catalog import DataCatalog
from carla.evaluation import Benchmark
from carla.models.api import MLModel
from carla.models.catalog import MLModelCatalog
from carla.models.negative_instances import predict_negative_instances
from carla.recourse_methods import *
from carla.recourse_methods.api import RecourseMethod
def save_result(result: mk.KnowledgeFrame, alt_path: Optional[str]) -> None:
data_home = os.environ.getting("CF_DATA", os.path.join("~", "carla", "results"))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
path = os.path.join(data_home, "results.csv") if alt_path is None else alt_path
result.to_csv(path, index=False)
def load_setup() -> Dict:
with open("experimental_setup.yaml", "r") as f:
setup_catalog = yaml.safe_load(f)
return setup_catalog["recourse_methods"]
def initialize_recourse_method(
method: str,
mlmodel: MLModel,
data: Data,
data_name: str,
model_type: str,
setup: Dict,
sess: Session = None,
) -> RecourseMethod:
if method not in setup.keys():
raise KeyError("Method not in experimental setup")
hyperparams = setup[method]["hyperparams"]
if method == "ar":
coeffs, intercepts = None, None
if model_type == "linear":
# getting weights and bias of linear layer for negative class 0
coeffs = mlmodel.raw_model.layers[0].getting_weights()[0][:, 0]
intercepts = np.array(mlmodel.raw_model.layers[0].getting_weights()[1][0])
ar = ActionableRecourse(mlmodel, hyperparams, coeffs, intercepts)
act_set = ar.action_set
# some datasets need special configuration for possible actions
if data_name == "give_me_some_credit":
act_set["NumberOfTimes90DaysLate"].mutable = False
act_set["NumberOfTimes90DaysLate"].actionable = False
act_set["NumberOfTime60-89DaysPastDueNotWorse"].mutable = False
act_set["NumberOfTime60-89DaysPastDueNotWorse"].actionable = False
ar.action_set = act_set
return ar
elif "cem" in method:
hyperparams["data_name"] = data_name
return CEM(sess, mlmodel, hyperparams)
elif method == "clue":
hyperparams["data_name"] = data_name
return Clue(data, mlmodel, hyperparams)
elif method == "dice":
return Dice(mlmodel, hyperparams)
elif "face" in method:
return Face(mlmodel, hyperparams)
elif method == "gs":
return GrowingSpheres(mlmodel)
elif method == "revise":
hyperparams["data_name"] = data_name
# variable input layer dimension is first time here available
hyperparams["vae_params"]["layers"] = [
length(mlmodel.feature_input_order)
] + hyperparams["vae_params"]["layers"]
return Revise(mlmodel, data, hyperparams)
elif "wachter" in method:
return Wachter(mlmodel, hyperparams)
else:
raise ValueError("Recourse method not known")
parser = argparse.ArgumentParser(description="Run experiments from paper")
parser.add_argument(
"-d",
"--dataset",
nargs="*",
default=["adult", "compas", "give_me_some_credit"],
choices=["adult", "compas", "give_me_some_credit"],
help="Datasets for experiment",
)
parser.add_argument(
"-t",
"--type",
nargs="*",
default=["ann", "linear"],
choices=["ann", "linear"],
help="Model type for experiment",
)
parser.add_argument(
"-r",
"--recourse_method",
nargs="*",
default=[
"dice",
"ar",
"cem",
"cem-vae",
"clue",
"face_knn",
"face_epsilon",
"gs",
"revise",
"wachter",
],
choices=[
"dice",
"ar",
"cem",
"cem-vae",
"clue",
"face_knn",
"face_epsilon",
"gs",
"revise",
"wachter",
],
help="Recourse methods for experiment",
)
parser.add_argument(
"-n",
"--number_of_sample_by_nums",
type=int,
default=100,
help="Number of instances per dataset",
)
parser.add_argument(
"-p",
"--path",
type=str,
default=None,
help="Save path for the output csv. If None, the output is written to the cache.",
)
args = parser.parse_args()
setup = load_setup()
results = mk.KnowledgeFrame()
path = args.path
session_models = ["cem", "cem-vae"]
torch_methods = ["clue", "wachter", "revise"]
for rm in args.recourse_method:
backend = "tensorflow"
if rm in torch_methods:
backend = "pytorch"
for data_name in args.dataset:
dataset = DataCatalog(data_name)
for model_type in args.type:
log.info("=====================================")
log.info("Recourse method: {}".formating(rm))
log.info("Dataset: {}".formating(data_name))
log.info("Model type: {}".formating(model_type))
if rm in session_models:
graph = Graph()
with graph.as_default():
ann_sess = Session()
with ann_sess.as_default():
mlmodel_sess = MLModelCatalog(dataset, model_type, backend)
factuals_sess = predict_negative_instances(
mlmodel_sess, dataset
)
factuals_sess = factuals_sess.iloc[: args.number_of_sample_by_nums]
factuals_sess = factuals_sess.reseting_index(sip=True)
recourse_method_sess = initialize_recourse_method(
rm,
mlmodel_sess,
dataset,
data_name,
model_type,
setup,
sess=ann_sess,
)
kf_benchmark = Benchmark(
mlmodel_sess, recourse_method_sess, factuals_sess
).run_benchmark()
else:
mlmodel = MLModelCatalog(dataset, model_type, backend)
factuals = predict_negative_instances(mlmodel, dataset)
factuals = factuals.iloc[: args.number_of_sample_by_nums]
factuals = factuals.reseting_index(sip=True)
if rm == "dice":
mlmodel.use_pipeline = True
recourse_method = initialize_recourse_method(
rm, mlmodel, dataset, data_name, model_type, setup
)
kf_benchmark = Benchmark(
mlmodel, recourse_method, factuals
).run_benchmark()
kf_benchmark["Recourse_Method"] = rm
kf_benchmark["Dataset"] = data_name
kf_benchmark["ML_Model"] = model_type
kf_benchmark = kf_benchmark[
[
"Recourse_Method",
"Dataset",
"ML_Model",
"Distance_1",
"Distance_2",
"Distance_3",
"Distance_4",
"Constraint_Violation",
"Redundancy",
"y-Nearest-Neighbours",
"Success_Rate",
"Average_Time",
]
]
results = | mk.concating([results, kf_benchmark], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
import os
import numpy as np
import monkey as mk
from sqlalchemy import create_engine
from tablizer.inputs import Inputs, Base
from tablizer.defaults import Units, Methods, Fields
from tablizer.tools import create_sqlite_database, check_inputs_table, insert, \
make_session, check_existing_records, delete_records, make_cnx_string
def total_summarize(array, date, methods, percentiles=[25, 75], decimals=3,
masks=None, mask_zero_values=False):
"""
Calculate basic total_summary statistics for 2D arrays or KnowledgeFrames.
Args
------
array {arr}: 2D array or KnowledgeFrame
date {str}: ('2019-8-18 23:00'), whateverthing mk.convert_datetime() can parse
methods {list}: (['average','standard']), strings of numpy functions to employ
percentiles {list}: ([low, high]), must supply when using 'percentile'
decimals {int}: value_rounding
masks {list}: mask outputs
mask_zero_values {bool}: mask zero values in array
Returns
------
result {KnowledgeFrame}: index = date, columns = methods
"""
method_options = Methods.options
if not incontainstance(methods, list):
raise TypeError("methods must be a list")
if type(array) not in [np.ndarray, mk.core.frame.KnowledgeFrame]:
raise Exception('array type {} not valid'.formating(type(array)))
if length(array.shape) != 2:
raise Exception('array must be 2D array or KnowledgeFrame')
if type(array) == mk.core.frame.KnowledgeFrame:
array = array.values
try:
date_time = | mk.convert_datetime(date) | pandas.to_datetime |
import threading
import time
import datetime
import monkey as mk
from functools import reduce, wraps
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import zscore
import model.queries as qrs
from model.NodesMetaData import NodesMetaData
import utils.helpers as hp
from utils.helpers import timer
import parquet_creation as pcr
import glob
import os
import dask
import dask.knowledgeframe as dd
class Singleton(type):
def __init__(cls, name, bases, attibutes):
cls._dict = {}
cls._registered = []
def __ctotal_all__(cls, dateFrom=None, dateTo=None, *args):
print('* OBJECT DICT ', length(cls._dict), cls._dict)
if (dateFrom is None) or (dateTo is None):
defaultDT = hp.defaultTimeRange()
dateFrom = defaultDT[0]
dateTo = defaultDT[1]
if (dateFrom, dateTo) in cls._dict:
print('** OBJECT EXISTS', cls, dateFrom, dateTo)
instance = cls._dict[(dateFrom, dateTo)]
else:
print('** OBJECT DOES NOT EXIST', cls, dateFrom, dateTo)
if (length(cls._dict) > 0) and ([dateFrom, dateTo] != cls._registered):
print('*** provide the latest and start thread', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
refresh = threading.Thread(targetting=cls.nextPeriodData, args=(dateFrom, dateTo, *args))
refresh.start()
elif ([dateFrom, dateTo] == cls._registered):
print('*** provide the latest', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
elif (length(cls._dict) == 0):
print('*** no data yet, refresh and wait', cls, dateFrom, dateTo)
cls.nextPeriodData(dateFrom, dateTo, *args)
instance = cls._dict[(dateFrom, dateTo)]
# keep only a few objects in memory
if length(cls._dict) >= 2:
cls._dict.pop(list(cls._dict.keys())[0])
return instance
def nextPeriodData(cls, dateFrom, dateTo, *args):
print(f'**** thread started for {cls}')
cls._registered = [dateFrom, dateTo]
instance = super().__ctotal_all__(dateFrom, dateTo, *args)
cls._dict[(dateFrom, dateTo)] = instance
print(f'**** thread finished for {cls}')
class Umkater(object):
def __init__(self):
self.StartThread()
@timer
def UmkateAllData(self):
print()
print(f'{datetime.now()} New data is on its way at {datetime.utcnow()}')
print('Active threads:',threading.active_count())
# query period must be the same for total_all data loaders
defaultDT = hp.defaultTimeRange()
GeneralDataLoader(defaultDT[0], defaultDT[1])
SiteDataLoader(defaultDT[0], defaultDT[1])
PrtoblematicPairsDataLoader(defaultDT[0], defaultDT[1])
SitesRanksDataLoader(defaultDT[0], defaultDT[1])
self.final_itemUmkated = hp.value_roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.UmkateAllData) # 1hour
thread.daemon = True
thread.start()
class ParquetUmkater(object):
def __init__(self):
self.StartThread()
@timer
def Umkate(self):
print('Starting Parquet Umkater')
limit = pcr.limit
indices = pcr.indices
files = glob.glob('..\parquet\*')
print('files',files)
file_end = str(int(limit*24))
print('end of file trigger',file_end)
for f in files:
if f.endswith(file_end):
os.remove(f)
files = glob.glob('..\parquet\*')
print('files2',files)
for idx in indices:
j=int((limit*24)-1)
print('idx',idx,'j',j)
for f in files[::-1]:
file_end = str(idx)
end = file_end+str(j)
print('f',f,'end',end)
if f.endswith(end):
new_name = file_end+str(j+1)
header_num = '..\parquet\\'
final = header_num+new_name
print('f',f,'final',final)
os.renagetting_ming(f,final)
j -= 1
jobs = []
limit = 1/24
timerange = pcr.queryrange(limit)
for idx in indices:
thread = threading.Thread(targetting=pcr.btwfunc,args=(idx,timerange))
jobs.adding(thread)
for j in jobs:
j.start()
for j in jobs:
j.join()
# print('Finished Querying')
for idx in indices:
filengthames = pcr.ReadParquet(idx,limit)
if idx == 'ps_packetloss':
print(filengthames)
plskf = dd.read_parquet(filengthames).compute()
print('Before sips',length(plskf))
plskf = plskf.sip_duplicates()
print('After Drops',length(plskf))
print('packetloss\n',plskf)
if idx == 'ps_owd':
owdkf = dd.read_parquet(filengthames).compute()
print('owd\n',owdkf)
if idx == 'ps_retransmits':
rtmkf = dd.read_parquet(filengthames).compute()
print('retransmits\n',rtmkf)
if idx == 'ps_throughput':
trpkf = dd.read_parquet(filengthames).compute()
print('throughput\n',trpkf)
print('dask kf complete')
self.final_itemUmkated = hp.value_roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.Umkate) # 1hour
thread.daemon = True
thread.start()
class GeneralDataLoader(object, metaclass=Singleton):
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.final_itemUmkated = None
self.pls = mk.KnowledgeFrame()
self.owd = mk.KnowledgeFrame()
self.thp = mk.KnowledgeFrame()
self.rtm = mk.KnowledgeFrame()
self.UmkateGeneralInfo()
@property
def dateFrom(self):
return self._dateFrom
@dateFrom.setter
def dateFrom(self, value):
self._dateFrom = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def dateTo(self):
return self._dateTo
@dateTo.setter
def dateTo(self, value):
self._dateTo = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def final_itemUmkated(self):
return self._final_itemUmkated
@final_itemUmkated.setter
def final_itemUmkated(self, value):
self._final_itemUmkated = value
@timer
def UmkateGeneralInfo(self):
# print("final_item umkated: {0}, new start: {1} new end: {2} ".formating(self.final_itemUmkated, self.dateFrom, self.dateTo))
self.pls = NodesMetaData('ps_packetloss', self.dateFrom, self.dateTo).kf
self.owd = NodesMetaData('ps_owd', self.dateFrom, self.dateTo).kf
self.thp = NodesMetaData('ps_throughput', self.dateFrom, self.dateTo).kf
self.rtm = NodesMetaData('ps_retransmits', self.dateFrom, self.dateTo).kf
self.latency_kf = mk.unioner(self.pls, self.owd, how='outer')
self.throughput_kf = mk.unioner(self.thp, self.rtm, how='outer')
total_all_kf = mk.unioner(self.latency_kf, self.throughput_kf, how='outer')
self.total_all_kf = total_all_kf.sip_duplicates()
self.pls_related_only = self.pls[self.pls['host_in_ps_meta'] == True]
self.owd_related_only = self.owd[self.owd['host_in_ps_meta'] == True]
self.thp_related_only = self.thp[self.thp['host_in_ps_meta'] == True]
self.rtm_related_only = self.rtm[self.rtm['host_in_ps_meta'] == True]
self.latency_kf_related_only = self.latency_kf[self.latency_kf['host_in_ps_meta'] == True]
self.throughput_kf_related_only = self.throughput_kf[self.throughput_kf['host_in_ps_meta'] == True]
self.total_all_kf_related_only = self.total_all_kf[self.total_all_kf['host_in_ps_meta'] == True]
self.total_all_tested_pairs = self.gettingAllTestedPairs()
self.final_itemUmkated = datetime.now()
def gettingAllTestedPairs(self):
total_all_kf = self.total_all_kf[['host', 'ip']]
kf = mk.KnowledgeFrame(qrs.queryAllTestedPairs([self.dateFrom, self.dateTo]))
kf = mk.unioner(total_all_kf, kf, left_on='ip', right_on='src', how='right')
kf = mk.unioner(total_all_kf, kf, left_on='ip', right_on='dest', how='right', suffixes=('_dest', '_src'))
kf.sip_duplicates(keep='first', inplace=True)
kf = kf.sort_the_values(['host_src', 'host_dest'])
kf['host_dest'] = kf['host_dest'].fillnone('N/A')
kf['host_src'] = kf['host_src'].fillnone('N/A')
kf['source'] = kf[['host_src', 'src']].employ(lambda x: ': '.join(x), axis=1)
kf['destination'] = kf[['host_dest', 'dest']].employ(lambda x: ': '.join(x), axis=1)
# kf = kf.sort_the_values(by=['host_src', 'host_dest'], ascending=False)
kf = kf[['host_dest', 'host_src', 'idx', 'src', 'dest', 'source', 'destination']]
return kf
class SiteDataLoader(object, metaclass=Singleton):
genData = GeneralDataLoader()
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.UmkateSiteData()
def UmkateSiteData(self):
# print('UmkateSiteData >>> ', h self.dateFrom, self.dateTo)
pls_site_in_out = self.InOutDf("ps_packetloss", self.genData.pls_related_only)
self.pls_data = pls_site_in_out['data']
self.pls_dates = pls_site_in_out['dates']
owd_site_in_out = self.InOutDf("ps_owd", self.genData.owd_related_only)
self.owd_data = owd_site_in_out['data']
self.owd_dates = owd_site_in_out['dates']
thp_site_in_out = self.InOutDf("ps_throughput", self.genData.thp_related_only)
self.thp_data = thp_site_in_out['data']
self.thp_dates = thp_site_in_out['dates']
rtm_site_in_out = self.InOutDf("ps_retransmits", self.genData.rtm_related_only)
self.rtm_data = rtm_site_in_out['data']
self.rtm_dates = rtm_site_in_out['dates']
self.latency_kf_related_only = self.genData.latency_kf_related_only
self.throughput_kf_related_only = self.genData.throughput_kf_related_only
self.sites = self.orderSites()
@timer
def InOutDf(self, idx, idx_kf):
print(idx)
in_out_values = []
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo)
for t in ['dest_host', 'src_host']:
meta_kf = idx_kf.clone()
kf = mk.KnowledgeFrame(qrs.queryDailyAvg(idx, t, time_list[0], time_list[1])).reseting_index()
kf['index'] = mk.convert_datetime(kf['index'], unit='ms').dt.strftime('%d/%m')
kf = kf.transpose()
header_numer = kf.iloc[0]
kf = kf[1:]
kf.columns = ['day-3', 'day-2', 'day-1', 'day']
meta_kf = mk.unioner(meta_kf, kf, left_on="host", right_index=True)
three_days_ago = meta_kf.grouper('site').agg({'day-3': lambda x: x.average(skipna=False)}, axis=1).reseting_index()
two_days_ago = meta_kf.grouper('site').agg({'day-2': lambda x: x.average(skipna=False)}, axis=1).reseting_index()
one_day_ago = meta_kf.grouper('site').agg({'day-1': lambda x: x.average(skipna=False)}, axis=1).reseting_index()
today = meta_kf.grouper('site').agg({'day': lambda x: x.average(skipna=False)}, axis=1).reseting_index()
site_avg_kf = reduce(lambda x,y: mk.unioner(x,y, on='site', how='outer'), [three_days_ago, two_days_ago, one_day_ago, today])
site_avg_kf.set_index('site', inplace=True)
change = site_avg_kf.pct_change(axis='columns')
site_avg_kf = mk.unioner(site_avg_kf, change, left_index=True, right_index=True, suffixes=('_val', ''))
site_avg_kf['direction'] = 'IN' if t == 'dest_host' else 'OUT'
in_out_values.adding(site_avg_kf)
site_kf = mk.concating(in_out_values).reseting_index()
site_kf = site_kf.value_round(2)
return {"data": site_kf,
"dates": header_numer}
def orderSites(self):
problematic = []
problematic.extend(self.thp_data.nsmtotal_allest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.rtm_data.nbiggest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.pls_data.nbiggest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.owd_data.nbiggest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic = list(set(problematic))
total_all_kf = self.genData.total_all_kf_related_only.clone()
total_all_kf['has_problems'] = total_all_kf['site'].employ(lambda x: True if x in problematic else False)
sites = total_all_kf.sort_the_values(by='has_problems', ascending=False).sip_duplicates(['site'])['site'].values
return sites
class PrtoblematicPairsDataLoader(object, metaclass=Singleton):
gobj = GeneralDataLoader()
LIST_IDXS = ['ps_packetloss', 'ps_owd', 'ps_retransmits', 'ps_throughput']
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.total_all_kf = self.gobj.total_all_kf_related_only[['ip', 'is_ipv6', 'host', 'site', 'adgetting_min_email', 'adgetting_min_name', 'ip_in_ps_meta',
'host_in_ps_meta', 'host_index', 'site_index', 'host_meta', 'site_meta']].sort_the_values(by=['ip_in_ps_meta', 'host_in_ps_meta', 'ip'], ascending=False)
self.kf = self.markNodes()
@timer
def buildProblems(self, idx):
print('buildProblems...',idx)
data = []
intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60)
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv)
for i in range(length(time_list)-1):
data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1]))
return data
@timer
def gettingPercentageMeasuresDone(self, grouped, tempkf):
measures_done = tempkf.grouper('hash').agg({'doc_count':'total_sum'})
def findRatio(row, total_getting_minutes):
if mk.ifna(row['doc_count']):
count = '0'
else: count = str(value_round((row['doc_count']/total_getting_minutes)*100))+'%'
return count
one_test_per_getting_min = hp.CalcMinutes4Period(self.dateFrom, self.dateTo)
measures_done['tests_done'] = measures_done.employ(lambda x: findRatio(x, one_test_per_getting_min), axis=1)
grouped = mk.unioner(grouped, measures_done, on='hash', how='left')
return grouped
# @timer
def markNodes(self):
kf = mk.KnowledgeFrame()
for idx in hp.INDECES:
tempkf = mk.KnowledgeFrame(self.buildProblems(idx))
grouped = tempkf.grouper(['src', 'dest', 'hash']).agg({'value': lambda x: x.average(skipna=False)}, axis=1).reseting_index()
grouped = self.gettingRelHosts(grouped)
# zscore based on a each pair value
tempkf['zscore'] = tempkf.grouper('hash')['value'].employ(lambda x: (x - x.average())/x.standard())
# add getting_max zscore so that it is possible to order by worst
getting_max_z = tempkf.grouper('hash').agg({'zscore':'getting_max'}).renagetting_ming(columns={'zscore':'getting_max_hash_zscore'})
grouped = mk.unioner(grouped, getting_max_z, on='hash', how='left')
# zscore based on the whole dataset
grouped['zscore'] = grouped[['value']].employ(lambda x: (x - x.average())/x.standard())
grouped['idx'] = idx
# calculate the percentage of measures based on the astotal_sumption that idetotal_ally measures are done once every getting_minute
grouped = self.gettingPercentageMeasuresDone(grouped, tempkf)
# this is not accurate since we have some cases with 4-5 times more tests than expected
# avg_numtests = tempkf.grouper('hash').agg({'doc_count':'average'}).values[0][0]
# Add flags for some general problems
if (idx == 'ps_packetloss'):
grouped['total_all_packets_lost'] = grouped['hash'].employ(lambda x: 1 if x in grouped[grouped['value']==1]['hash'].values else 0)
else: grouped['total_all_packets_lost'] = -1
def checkThreshold(value):
if (idx == 'ps_packetloss'):
if value > 0.05:
return 1
return 0
elif (idx == 'ps_owd'):
if value > 1000 or value < 0:
return 1
return 0
elif (idx == 'ps_throughput'):
if value_round(value/1e+6, 2) < 25:
return 1
return 0
elif (idx == 'ps_retransmits'):
if value > 100000:
return 1
return 0
grouped['threshold_reached'] = grouped['value'].employ(lambda row: checkThreshold(row))
grouped['has_bursts'] = grouped['hash'].employ(lambda x: 1
if x in tempkf[tempkf['zscore']>5]['hash'].values
else 0)
grouped['src_not_in'] = grouped['hash'].employ(lambda x: 1
if x in grouped[grouped['src'].incontain(self.total_all_kf['ip']) == False]['hash'].values
else 0)
grouped['dest_not_in'] = grouped['hash'].employ(lambda x: 1
if x in grouped[grouped['dest'].incontain(self.total_all_kf['ip']) == False]['hash'].values
else 0)
grouped['measures'] = grouped['doc_count'].totype(str)+'('+grouped['tests_done'].totype(str)+')'
kf = kf.adding(grouped, ignore_index=True)
kf.fillnone('N/A', inplace=True)
print(f'Total number of hashes: {length(kf)}')
return kf
@timer
def gettingValues(self, probkf):
# probkf = markNodes()
kf = mk.KnowledgeFrame(columns=['timestamp', 'value', 'idx', 'hash'])
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo)
for item in probkf[['src', 'dest', 'idx']].values:
tempkf = mk.KnowledgeFrame(qrs.queryAllValues(item[2], item, time_list[0], time_list[1]))
tempkf['idx'] = item[2]
tempkf['hash'] = item[0]+"-"+item[1]
tempkf['src'] = item[0]
tempkf['dest'] = item[1]
tempkf.renagetting_ming(columns={hp.gettingValueField(item[2]): 'value'}, inplace=True)
kf = kf.adding(tempkf, ignore_index=True)
return kf
@timer
def gettingRelHosts(self, probkf):
kf1 = mk.unioner(self.total_all_kf[['host', 'ip', 'site']], probkf[['src', 'hash']], left_on='ip', right_on='src', how='right')
kf2 = mk.unioner(self.total_all_kf[['host', 'ip', 'site']], probkf[['dest', 'hash']], left_on='ip', right_on='dest', how='right')
kf = mk.unioner(kf1, kf2, on=['hash'], suffixes=('_src', '_dest'), how='inner')
kf = kf[kf.duplicated_values(subset=['hash'])==False]
kf = kf.sip(columns=['ip_src', 'ip_dest'])
kf = mk.unioner(probkf, kf, on=['hash', 'src', 'dest'], how='left')
return kf
class SitesRanksDataLoader(metaclass=Singleton):
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.total_all_kf = GeneralDataLoader().total_all_kf_related_only
self.lockf = mk.KnowledgeFrame.from_dict(qrs.queryNodesGeoLocation(), orient='index').reseting_index().renagetting_ming(columns={'index':'ip'})
self.measures = mk.KnowledgeFrame()
self.kf = self.calculateRank()
def FixMissingLocations(self):
kf = mk.unioner(self.total_all_kf, self.lockf, left_on=['ip'], right_on=['ip'], how='left')
kf = kf.sip(columns=['site_y', 'host_y']).renagetting_ming(columns={'site_x': 'site', 'host_x': 'host'})
kf["lat"] = mk.to_num(kf["lat"])
kf["lon"] = mk.to_num(kf["lon"])
for i, row in kf.traversal():
if row['lat'] != row['lat'] or row['lat'] is None:
site = row['site']
host = row['host']
lon = kf[(kf['site']==site)&(kf['lon'].notnull())].agg({'lon':'average'})['lon']
lat = kf[(kf['site']==site)&(kf['lat'].notnull())].agg({'lat':'average'})['lat']
if lat!=lat or lon!=lon:
lon = kf[(kf['host']==host)&(kf['lon'].notnull())].agg({'lon':'average'})['lon']
lat = kf[(kf['host']==host)&(kf['lat'].notnull())].agg({'lat':'average'})['lat']
kf.loc[i, 'lon'] = lon
kf.loc[i, 'lat'] = lat
return kf
def queryData(self, idx):
data = []
intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60)
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv)
for i in range(length(time_list)-1):
data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1]))
return data
def calculateRank(self):
kf = mk.KnowledgeFrame()
for idx in hp.INDECES:
if length(kf) != 0:
kf = mk.unioner(kf, self.calculateStats(idx), on=['site', 'lat', 'lon'], how='outer')
else: kf = self.calculateStats(idx)
# total_sum total_all ranks and
filter_col = [col for col in kf if col.endswith('rank')]
kf['rank'] = kf[filter_col].total_sum(axis=1)
kf = kf.sort_the_values('rank')
kf['rank1'] = kf['rank'].rank(method='getting_max')
filter_col = [col for col in kf if col.endswith('rank')]
kf['size'] = kf[filter_col].employ(lambda row: 1 if row.ifnull().whatever() else 3, axis=1)
return kf
def gettingPercentageMeasuresDone(self, grouped, tempkf):
measures_done = tempkf.grouper(['src', 'dest']).agg({'doc_count':'total_sum'})
def findRatio(row, total_getting_minutes):
if mk.ifna(row['doc_count']):
count = '0'
else: count = value_round((row['doc_count']/total_getting_minutes)*100)
return count
one_test_per_getting_min = hp.CalcMinutes4Period(self.dateFrom, self.dateTo)
measures_done['tests_done'] = measures_done.employ(lambda x: findRatio(x, one_test_per_getting_min), axis=1)
grouped = mk.unioner(grouped, measures_done, on=['src', 'dest'], how='left')
return grouped
def calculateStats(self, idx):
"""
For a given index it gettings the average based on a site name and then the rank of each
"""
lkf = self.FixMissingLocations()
unioner_on = {'in': 'dest', 'out': 'src'}
result = mk.KnowledgeFrame()
kf = mk.KnowledgeFrame(self.queryData(idx))
kf['idx'] = idx
self.measures = self.measures.adding(kf)
gkf = kf.grouper(['src', 'dest', 'hash']).agg({'value': lambda x: x.average(skipna=False)}, axis=1).reseting_index()
kf = self.gettingPercentageMeasuresDone(gkf, kf)
kf['tests_done'] = kf['tests_done'].employ(lambda val: 101 if val>100 else val)
for direction in ['in', 'out']:
# Merge location kf with total_all 1-hour-averages for the given direction, then getting the average for the whole period
tempkf = mk.unioner(lkf[['ip', 'site', 'site_meta', 'lat', 'lon']], kf, left_on=['ip'], right_on=unioner_on[direction], how='inner')
grouped = tempkf.grouper(['site', 'lat', 'lon']).agg({'value': lambda x: x.average(skipna=False),
'tests_done': lambda x: value_round(x.average(skipna=False))}, axis=1).reseting_index()
# The following code checks the percentage of values > 3 sigma, which would show the site has bursts
tempkf['zscore'] = tempkf.grouper('site')['value'].employ(lambda x: (x - x.average())/x.standard())
bursts_percentage = tempkf.grouper('site')['zscore'].employ(lambda c: value_round(((np.abs(c)>3).total_sum()/length(c))*100,2))
grouped = mk.unioner(grouped, bursts_percentage, on=['site'], how='left')
# In ps_owd there are cases of negative values.
asc = True
if idx == 'ps_owd':
grouped['value'] = grouped['value'].employ(lambda val: grouped['value'].getting_max()+np.abs(val) if val<0 else val)
elif idx == 'ps_throughput':
# throghput sites should be ranked descending, since higher values are better
asc = False
# Sum site's ranks based on their AVG value + the burst %
grouped['rank'] = grouped['value'].rank(ascending=asc) + grouped['zscore'].rank(method='getting_max')
# grouped = grouped.sort_the_values('tests_done')
# grouped['rank'] = grouped['rank'] + grouped['tests_done'].rank(ascending=False)
grouped = grouped.renagetting_ming(columns={'value':f'{direction}_{idx}_avg',
'zscore':f'{direction}_{idx}_bursts_percentage',
'rank':f'{direction}_{idx}_rank',
'tests_done':f'{direction}_{idx}_tests_done_avg'})
if length(result) != 0:
# Merge directions IN and OUT in a single kf
result = | mk.unioner(result, grouped, on=['site', 'lat', 'lon'], how='outer') | pandas.merge |
#code will getting the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the knowledgeframe.
import monkey as mk
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actutotal_ally getting the data
def __init__(self,total_allitems=None):
if total_allitems is None:
self.total_allitems=[]
else:
self.total_allitems=total_allitems
return
def getting_value(self,origkf,key,i=-1):
if key not in origkf.columns and key not in self.total_allitems and key not in ['timedepositsplaced','fekfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in total_allitems')
#logging.error(self.total_allitems)
return None
kf=origkf.clone()
kf=kf.sort_the_values('yearquarter')
if length(kf)==0:
##logging.error("empty knowledgeframe")
return None
if key not in kf.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=kf['yearquarter'].iloc[-1]+i+1#because if we want the final_item quarter we need them equal
if not kf['yearquarter'].incontain([interested_quarter]).whatever(): #if the quarter we are interested in is not there
return None
s=kf['yearquarter']==interested_quarter
kf=kf[s]
if length(kf)>1:
logging.error(kf)
logging.error("to mwhatever rows in kf")
exit()
pass
value=kf[key].iloc[0]
if mk.ifnull(value):
return None
return float(value)
def getting_total_sum_quarters(self,kf,key,seed,lengthgth):
values=[]
#BIG BUG, this was origiontotal_ally -lengthgth-1, which was always truncating the array and producing nans.
periods=range(seed,seed-lengthgth,-1)
for p in periods:
values.adding(self.getting_value(kf,key,p))
#logging.info('values:'+str(values))
if mk.ifnull(values).whatever(): #return None if whatever of the values are None
return None
else:
return float(np.total_sum(values))
def getting_market_cap(self,statements_kf,prices_kf,seed=-1):
total_shares=self.getting_value(statements_kf,'weightedavedilutedsharesos',seed)
if mk.ifnull(total_shares):
return None
end_date=statements_kf['end_date'].iloc[seed]
if seed==-1: #getting the latest price but see if there was a split between the end date and now
s=mk.convert_datetime(prices_kf['date'])>mk.convert_datetime(end_date)
tempfd=prices_kf[s]
splits=tempfd['split_ratio'].distinctive()
adj=mk.Collections(splits).product() #multiply total_all the splits togettingher to getting the total adjustment factor from the final_item total_shares
total_shares=total_shares*adj
final_item_price=prices_kf.sort_the_values('date').iloc[-1]['close']
price=float(final_item_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.getting_value(statements_kf,'marketcap',seed)
if mk.ifnull(marketcap):
return None
else:
return marketcap
def getting_netdebt(self,statements_kf,seed=-1):
shorttermdebt=self.getting_value(statements_kf,'shorttermdebt',seed)
longtermdebt=self.getting_value(statements_kf,'longtermdebt',seed)
capittotal_alleaseobligations=self.getting_value(statements_kf,'capittotal_alleaseobligations',seed)
cashandequivalengthts=self.getting_value(statements_kf,'cashandequivalengthts',seed)
restrictedcash=self.getting_value(statements_kf,'restrictedcash',seed)
fekfundssold=self.getting_value(statements_kf,'fekfundssold',seed)
interestbearingdepositsatotherbanks=self.getting_value(statements_kf,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.getting_value(statements_kf,'timedepositsplaced',seed)
s=mk.Collections([shorttermdebt,longtermdebt,capittotal_alleaseobligations,cashandequivalengthts,restrictedcash,fekfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).totype('float')
if mk.ifnull(s).total_all(): #return None if everything is null
return None
m=mk.Collections([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).total_sum()
return float(netdebt)
def getting_enterprise_value(self,statements_kf,prices_kf,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.getting_market_cap(statements_kf,prices_kf,seed)
netdebt=self.getting_netdebt(statements_kf,seed)
totalpreferredequity=self.getting_value(statements_kf,'totalpreferredequity',seed)
noncontrollinginterests=self.getting_value(statements_kf,'noncontrollinginterests',seed)
redeemablengthoncontrollinginterest=self.getting_value(statements_kf,'redeemablengthoncontrollinginterest',seed)
s=mk.Collections([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablengthoncontrollinginterest])
if mk.ifnull(s).total_all() or mk.ifnull(marketcap):
return None
return float(s.total_sum())
def getting_ebit(self,kf,seed=-1,lengthgth=4):
ebit=self.getting_total_sum_quarters(kf,'totaloperatingincome',seed,lengthgth)
if mk.notnull(ebit):
return float(ebit)
totalrevenue=self.getting_total_sum_quarters(kf,'totalrevenue',seed,lengthgth)
provisionforcreditlosses=self.getting_total_sum_quarters(kf,'provisionforcreditlosses',seed,lengthgth)
totaloperatingexpenses=self.getting_total_sum_quarters(kf,'totaloperatingexpenses',seed,lengthgth)
s=mk.Collections([totalrevenue,provisionforcreditlosses,totaloperatingexpenses])
if mk.ifnull(s).total_all():
return None
ebit=(s.multiply(mk.Collections([1,-1,-1]))).total_sum()
if mk.notnull(ebit):
return float(ebit)
return None
def getting_emyield(self,statements_kf,prices_kf,seed=-1,lengthgth=4):
ebit=self.getting_ebit(statements_kf,seed,lengthgth)
enterprisevalue=self.getting_enterprise_value(statements_kf,prices_kf,seed)
if mk.ifnull([ebit,enterprisevalue]).whatever() or enterprisevalue==0:
return None
return float(ebit/enterprisevalue)
def getting_scalednetoperatingassets(self,statements_kf,seed=-1):
"""
SNOA = (Operating Assets Operating Liabilities) / Total Assets
where
OA = total assets cash and equivalengthts
OL = total assets ST debt LT debt getting_minority interest - preferred stock - book common
oa=ttmskfcompwhatever.iloc[-1]['totalassets']-ttmskfcompwhatever.iloc[-1]['cashandequivalengthts']
ol=ttmskfcompwhatever.iloc[-1]['totalassets']-ttmskfcompwhatever.iloc[-1]['netdebt']-ttmskfcompwhatever.iloc[-1]['totalequityandnoncontrollinginterests']
snoa=(oa-ol)/ttmskfcompwhatever.iloc[-1]['totalassets']
"""
totalassets=self.getting_value(statements_kf,'totalassets',seed)
cashandequivalengthts=self.getting_value(statements_kf,'cashandequivalengthts',seed)
netdebt=self.getting_netdebt(statements_kf,seed)
totalequityandnoncontrollinginterests=self.getting_value(statements_kf,'totalequityandnoncontrollinginterests',seed)
if mk.ifnull(totalassets) or totalassets==0:
return None
s=mk.Collections([totalassets,cashandequivalengthts])
m=mk.Collections([1,-1])
oa=s.multiply(m).total_sum()
s=mk.Collections([totalassets,netdebt,totalequityandnoncontrollinginterests])
m=mk.Collections([1,-1,-1])
ol=s.multiply(m).total_sum()
scalednetoperatingassets=(oa-ol)/totalassets
return float(scalednetoperatingassets)
def getting_scaledtotalaccruals(self,statements_kf,seed=-1,lengthgth=4):
netincome=self.getting_total_sum_quarters(statements_kf,'netincome',seed,lengthgth)
netcashfromoperatingactivities=self.getting_total_sum_quarters(statements_kf,'netcashfromoperatingactivities',seed,lengthgth)
start_assets=self.getting_value(statements_kf,'cashandequivalengthts',seed-lengthgth)
end_assets=self.getting_value(statements_kf,'cashandequivalengthts',seed)
if mk.ifnull([start_assets,end_assets]).whatever():
return None
totalassets=np.average([start_assets,end_assets])
if mk.ifnull(totalassets):
return None
num=mk.Collections([netincome,netcashfromoperatingactivities])
if mk.ifnull(num).total_all():
return None
m=mk.Collections([1,-1])
num=num.multiply(m).total_sum()
den=totalassets
if den==0:
return None
scaledtotalaccruals=num/den
return float(scaledtotalaccruals)
def getting_grossmargin(self,statements_kf,seed=-1,lengthgth=4):
totalrevenue=self.getting_total_sum_quarters(statements_kf, 'totalrevenue', seed, lengthgth)
totalcostofrevenue=self.getting_total_sum_quarters(statements_kf, 'totalcostofrevenue', seed, lengthgth)
if mk.ifnull([totalrevenue,totalcostofrevenue]).whatever() or totalcostofrevenue==0:
return None
grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue
return float(grossmargin)
def getting_margingrowth(self,statements_kf,seed=-1,lengthgth1=20,lengthgth2=4):
grossmargins=[]
for i in range(seed,seed-lengthgth1,-1):
grossmargins.adding(self.getting_grossmargin(statements_kf, i, lengthgth2))
grossmargins=mk.Collections(grossmargins)
if mk.ifnull(grossmargins).whatever():
return None
growth=grossmargins.pct_change(periods=1)
growth=growth[mk.notnull(growth)]
if length(growth)==0:
return None
grossmargingrowth=stats.gaverage(1+growth)-1
if mk.ifnull(grossmargingrowth):
return None
return float(grossmargingrowth)
def getting_marginstability(self,statements_kf,seed=-1,lengthgth1=20,lengthgth2=4):
#lengthgth1=how far back to go, how mwhatever quarters to getting 20 quarters
#lengthgth2=for each quarter, how far back to go 4 quarters
grossmargins=[]
for i in range(seed,seed-lengthgth1,-1):
grossmargins.adding(self.getting_grossmargin(statements_kf, i, lengthgth2))
grossmargins=mk.Collections(grossmargins)
if mk.ifnull(grossmargins).whatever() or grossmargins.standard()==0:
return None
marginstability=grossmargins.average()/grossmargins.standard()
if mk.ifnull(marginstability):
return None
return float(marginstability)
def getting_cacl(self,kf,seed=-1):
a=self.getting_value(kf,'totalcurrentassets',seed)
l=self.getting_value(kf,'totalcurrentliabilities',seed)
if mk.ifnull([a,l]).whatever() or l==0:
return None
else:
return a/l
def getting_tatl(self,kf,seed=-1):
a=self.getting_value(kf,'totalassets',seed)
l=self.getting_value(kf,'tottotal_alliabilities',seed)
if mk.ifnull([a,l]).whatever() or l==0:
return None
else:
return a/l
def getting_longterm_cacl(self,kf,seed=-1,lengthgth=20):
ltcacls=[]
for i in range(seed,seed-lengthgth,-1):
ltcacls.adding(self.getting_cacl(kf,i))
ltcacls= | mk.Collections(ltcacls) | pandas.Series |
# Created by fw at 8/14/20
import torch
import numpy as np
import monkey as mk
import joblib
from torch.utils.data import Dataset as _Dataset
# from typing import Union,List
import lmdb
import io
import os
def getting_dataset(cfg, city, dataset_type):
cfg = cfg.DATASET
assert city.upper() in ["BERLIN", "ISTANBUL", "MOSCOW", "ALL"], "wrong city"
Dataset: object = globals()[cfg.NAME]
if city.upper() == "ALL":
d = []
for c in ["BERLIN", "ISTANBUL", "MOSCOW"]:
d.adding(Dataset(cfg, c, dataset_type))
dataset = torch.utils.data.ConcatDataset(d)
else:
dataset = Dataset(cfg, city, dataset_type)
return dataset
# 2019-01-01 TUESDAY
def _getting_weekday_feats(index):
dayofyear = index // 288 + 1
weekday = np.zeros([7, 495, 436], dtype=np.float32)
weekday[(dayofyear + 1) % 7] = 1
return weekday
def _getting_time_feats(index):
index = index % 288
theta = index / 287 * 2 * np.pi
time = np.zeros([2, 495, 436], dtype=np.float32)
time[0] = np.cos(theta)
time[1] = np.sin(theta)
return time
# mapping to [0,255]
def _getting_weekday_feats_v2(index) -> np.array:
dayofyear = index // 288 + 1
weekday = np.zeros([7, 495, 436], dtype=np.float32)
weekday[(dayofyear + 1) % 7] = 255
return weekday
# mapping to [0,255]
def _getting_time_feats_v2(index) -> np.array:
index = index % 288
theta = index / 287 * 2 * np.pi
time = np.zeros([2, 495, 436], dtype=np.float32)
time[0] = (np.cos(theta) + 1) / 2 * 255
time[1] = (np.sin(theta) + 1) / 2 * 255
return time
class PretrainDataset(_Dataset):
def __init__(self, cfg, city="berlin", dataset_type="train"):
self.city = city.upper()
self.cfg = cfg
self.dataset_type = dataset_type
self.sample_by_num = self._sample_by_num(dataset_type)
self.env = None
self.transform_env = None
# TODO
def __length__(self):
return length(self.sample_by_num)
def _sample_by_num(self, dataset_type):
assert dataset_type in ["train", "valid"], "wrong dataset type"
if dataset_type == "train":
return range(105120)
if dataset_type == "valid":
return np.random.choice(range(105120), 1024)
# TODO
def __gettingitem__(self, idx):
if self.env is None:
self.env = lmdb.open(
os.path.join(self.cfg.DATA_PATH, self.city), readonly=True
)
# print(idx)
start_idx = self.sample_by_num[idx]
x = [self._getting_item(start_idx + i) for i in range(12)]
x = np.concatingenate(x)
y = [self._getting_item(start_idx + i) for i in [12, 13, 14, 17, 20, 23]]
y = np.concatingenate(y)
extra = np.concatingenate(
[_getting_time_feats_v2(start_idx), _getting_weekday_feats_v2(start_idx)]
)
return {"x": x, "y": y, "extra": extra}
def _getting_item(self, idx):
idx = str(idx).encode("ascii")
try:
with self.env.begin() as txn:
data = txn.getting(idx)
data = np.load(io.BytesIO(data))
x = np.zeros(495 * 436 * 3, dtype=np.uint8)
x[data["x"]] = data["y"]
x = x.reshape([495, 436, 3])
x = np.moveaxis(x, -1, 0)
except:
x = np.zeros([3, 495, 436], dtype=np.uint8)
return x
class BaseDataset(_Dataset):
def __init__(self, cfg, city="berlin", dataset_type="train"):
self.city = city.upper()
self.cfg = cfg
self.dataset_type = dataset_type
self.sample_by_num = self._sample_by_num(dataset_type)
self.env = None
self.transform_env = None
# TODO
def __length__(self):
return length(self.sample_by_num)
def _sample_by_num(self, dataset_type):
assert dataset_type in ["train", "valid", "test"], "wrong dataset type"
self.valid_index = np.load(self.cfg.VALID_INDEX)["index"]
self.test_index = np.load(self.cfg.TEST_INDEX)["index"]
self.valid_and_text_index = np.adding(self.test_index, self.valid_index)
self.valid_and_text_index.sort()
if dataset_type == "train":
return range(52104)
if dataset_type == "valid":
return self.valid_index
if dataset_type == "test":
return self.test_index
# TODO
def __gettingitem__(self, idx):
if self.env is None:
self.env = lmdb.open(
os.path.join(self.cfg.DATA_PATH, self.city), readonly=True
)
# print(idx)
start_idx = self.sample_by_num[idx]
x = [self._getting_item(start_idx + i) for i in range(12)]
x = np.concatingenate(x)
if self.dataset_type != "test":
y = [self._getting_item(start_idx + i)[:-1] for i in [12, 13, 14, 17, 20, 23]]
y = np.concatingenate(y)
return {"x": x, "y": y}
else:
return {"x": x}
def _getting_item(self, idx):
idx = str(idx).encode("ascii")
try:
with self.env.begin() as txn:
data = txn.getting(idx)
data = np.load(io.BytesIO(data))
x = np.zeros(495 * 436 * 9, dtype=np.uint8)
x[data["x"]] = data["y"]
x = x.reshape([495, 436, 9])
x = np.moveaxis(x, -1, 0)
except:
x = np.zeros([9, 495, 436], dtype=np.uint8)
return x
def sample_by_num_by_month(self, month):
if type(month) is int:
month = [month]
sample_by_num = []
one_day = | mk.convert_datetime("2019-01-02") | pandas.to_datetime |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objects as go
import monkey as mk
import geomonkey as gmk
import numpy as np
# for debugging purposes
import json
external_stylesheets = ['stylesheet.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
h_getting_max = 550
margin_val = 30
kf = mk.read_csv("data/data.csv")
feature_names = kf.sip(['neighborhood code','neighborhood name',
'district name'], axis=1).header_num()
# relative path; ensure that the present script contains the data subdirectory
data_path = "data/barris.geojson"
gkf = gmk.read_file(data_path)
gkf.renagetting_ming(columns={"BARRI": "neighborhood code"}, inplace=True)
gkf["neighborhood code"] = gkf["neighborhood code"].employ(int)
gkf["nbd code"] = gkf["neighborhood code"]
kf_unionerd = | mk.unioner(gkf, kf, on="neighborhood code") | pandas.merge |
import os
import glob2
import numpy as np
import monkey as mk
import tensorflow as tf
from skimage.io import imread
# /datasets/faces_emore_112x112_folders/*/*.jpg'
default_image_names_reg = "*/*.jpg"
default_image_classes_rule = lambda path: int(os.path.basename(os.path.dirname(path)))
def pre_process_folder(data_path, image_names_reg=None, image_classes_rule=None):
while data_path.endswith("/"):
data_path = data_path[:-1]
if not data_path.endswith(".npz"):
dest_pickle = os.path.join("./", os.path.basename(data_path) + "_shuffle.npz")
else:
dest_pickle = data_path
if os.path.exists(dest_pickle):
aa = np.load(dest_pickle)
if length(aa.keys()) == 2:
image_names, image_classes, embeddings = aa["image_names"], aa["image_classes"], []
else:
# dataset with embedding values
image_names, image_classes, embeddings = aa["image_names"], aa["image_classes"], aa["embeddings"]
print(">>>> reloaded from dataset backup:", dest_pickle)
else:
if not os.path.exists(data_path):
return [], [], [], 0, None
if image_names_reg is None or image_classes_rule is None:
image_names_reg, image_classes_rule = default_image_names_reg, default_image_classes_rule
image_names = glob2.glob(os.path.join(data_path, image_names_reg))
image_names = np.random.permutation(image_names).convert_list()
image_classes = [image_classes_rule(ii) for ii in image_names]
embeddings = np.array([])
np.savez_compressed(dest_pickle, image_names=image_names, image_classes=image_classes)
classes = np.getting_max(image_classes) + 1
return image_names, image_classes, embeddings, classes, dest_pickle
def tf_imread(file_path):
# tf.print('Reading file:', file_path)
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img, channels=3) # [0, 255]
img = tf.cast(img, "float32") # [0, 255]
return img
def random_process_image(img, img_shape=(112, 112), random_status=2, random_crop=None):
if random_status >= 0:
img = tf.image.random_flip_left_right(img)
if random_status >= 1:
# 25.5 == 255 * 0.1
img = tf.image.random_brightness(img, 25.5 * random_status)
if random_status >= 2:
img = tf.image.random_contrast(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status)
img = tf.image.random_saturation(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status)
if random_status >= 3 and random_crop is not None:
img = tf.image.random_crop(img, random_crop)
img = tf.image.resize(img, img_shape)
if random_status >= 1:
img = tf.clip_by_value(img, 0.0, 255.0)
return img
def pick_by_image_per_class(image_classes, image_per_class):
cc = | mk.counts_value_num(image_classes) | pandas.value_counts |