content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
---|---|---|
import math
import threading
from django.core.cache import caches
from .settings import CACHE_HELPERS_ALIAS
CACHE_HELPERS_KEY = 'cache_helpers_key'
def set_cache_bust_status(bust_key=None):
cache = caches[CACHE_HELPERS_ALIAS]
cache.set(CACHE_HELPERS_KEY, bust_key)
def get_bust_key():
cache = caches[CACHE_HELPERS_ALIAS]
return cache.get(CACHE_HELPERS_KEY, None)
def mark_response_as_processed(response):
setattr(response, '_already_cahed', True)
def check_response_has_been_processed(response):
return getattr(response, '_already_cahed', False)
def check_bust_header(request):
bust_key = request.META.get('HTTP_BUST', '')
return False if (not bust_key or bust_key != get_bust_key()) else True
# TODO avoid list user generator
def threaded_cue(cue, callback, threads):
def process_chunk(begining, end, worker_num):
for index, item in enumerate(cue[begining:end]):
real_index = (begining + index) if begining > 0 else index
result = callback(item)
if result:
cue[real_index] = result
CHUNK_SIZE = math.ceil(len(cue) / threads)
end = 0
threads_refs = []
for i in range(threads):
begining = end
end = begining + CHUNK_SIZE
t = threading.Thread(target=process_chunk, args=(begining, end if end < len(cue) else len(cue), i))
t.start()
threads_refs.append(t)
for i in threads_refs:
t.join()
return cue
def get_ref_from_func(func):
if hasattr(func, '__self__'):
return func.__self__.__class__
return func
def get_func_from_func(func):
if hasattr(func, '__wrapped__'):
return func.__wrapped__
return func
def func_to_string(func):
func = func.func if hasattr(func, 'func') else func
ref = get_ref_from_func(func)
chunks = [
ref.__module__,
ref.__name__,
]
func = get_func_from_func(func)
if func.__name__ != chunks[-1]:
chunks.append(func.__name__)
return '.'.join(chunks)
def invalidate_cache(cache_key, cache=None):
cache = caches[cache if cache is not None else CACHE_HELPERS_ALIAS]
cache.delete(cache_key)
| nilq/baby-python | python |
"""
Provides the functionality to feed TF templates with Jerakia lookups
"""
import sys
import os
from jerakia import Jerakia
from terraform_external_data import terraform_external_data
def retrieveLookupInfo(query,item):
lookitem = query[item]
lookuppath =lookitem.split('/')
key = lookuppath.pop()
namespace = lookuppath
if not namespace:
raise Exception("No namespace given %s" % item )
return namespace,key
@terraform_external_data
def lookupJerakia(query,variables=None):
jerakia = Jerakia(configfile=os.path.abspath('utils/jerakia.yaml'))
resdict = {}
for item in query:
namespace,key = retrieveLookupInfo(query,item)
ret = []
response = jerakia.lookup(key=key, namespace=namespace, variables=variables)
ret.append(response['payload'])
resdict.update({item: str(ret)})
return resdict
if __name__ == '__main__':
lookupJerakia() | nilq/baby-python | python |
"""Internal helpers for dataset validation."""
from pathlib import Path
from typing import Any, Iterable, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from biopsykit.utils._types import _Hashable, path_t
from biopsykit.utils.exceptions import FileExtensionError, ValidationError, ValueRangeError
def _assert_is_dir(path: path_t, raise_exception: Optional[bool] = True) -> Optional[bool]:
"""Check if a path is a directory.
Parameters
----------
path : path or str
path to check if it's a directory
raise_exception : bool, optional
whether to raise an exception or return a bool value
Returns
-------
``True`` if ``path`` is a directory, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
ValueError
if ``raise_exception`` is ``True`` and ``path`` is not a directory
"""
# ensure pathlib
file_name = Path(path)
if not file_name.is_dir():
if raise_exception:
raise ValueError("The path '{}' is expected to be a directory, but it's not!".format(path))
return False
return True
def _assert_file_extension(
file_name: path_t, expected_extension: Union[str, Sequence[str]], raise_exception: Optional[bool] = True
) -> Optional[bool]:
"""Check if a file has the correct file extension.
Parameters
----------
file_name : path or str
file name to check for correct extension
expected_extension : str or list of str
file extension (or a list of file extensions) to check for
raise_exception : bool, optional
whether to raise an exception or return a bool value
Returns
-------
``True`` if ``file_name`` ends with one of the specified file extensions, ``False`` otherwise
(if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.FileExtensionError`
if ``raise_exception`` is ``True`` and ``file_name`` does not end with any of the specified
``expected_extension``
"""
# ensure pathlib
file_name = Path(file_name)
if isinstance(expected_extension, str):
expected_extension = [expected_extension]
if file_name.suffix not in expected_extension:
if raise_exception:
raise FileExtensionError(
"The file name extension is expected to be one of {}. "
"Instead it has the following extension: {}".format(expected_extension, file_name.suffix)
)
return False
return True
def _assert_is_dtype(
obj, dtype: Union[type, Tuple[type, ...]], raise_exception: Optional[bool] = True
) -> Optional[bool]:
"""Check if an object has a specific data type.
Parameters
----------
obj : any object
object to check
dtype : type or list of type
data type of tuple of data types to check
raise_exception : bool, optional
whether to raise an exception or return a bool value
Returns
-------
``True`` if ``obj`` is one of the expected data types, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``obj`` is none of the expected data types
"""
if not isinstance(obj, dtype):
if raise_exception:
raise ValidationError(
"The data object is expected to be one of ({},). But it is a {}".format(dtype, type(obj))
)
return False
return True
def _assert_has_multiindex(
df: pd.DataFrame,
expected: Optional[bool] = True,
nlevels: Optional[int] = 2,
nlevels_atleast: Optional[int] = False,
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
"""Check if a :any:`pandas.DataFrame` has a :any:`pandas.MultiIndex` as index.
Parameters
----------
df : :class:`~pandas.DataFrame`
The dataframe to check
expected : bool, optional
Whether the df is expected to have a MultiIndex index or not
nlevels : int, optional
If MultiIndex is expected, how many levels the MultiIndex index should have
nlevels_atleast : bool, optional
Whether the MultiIndex has to have at least ``nlevels`` (``True``)
or exactly match the number of levels (``False``)
raise_exception : bool, optional
whether to raise an exception or return a bool value
Returns
-------
``True`` if ``df`` meets the expected index format, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``df`` does not meet the expected index format
"""
return _multiindex_check_helper(
df=df,
idx_or_col="index",
expected=expected,
nlevels=nlevels,
nlevels_atleast=nlevels_atleast,
raise_exception=raise_exception,
)
def _assert_has_index_levels(
df: pd.DataFrame,
index_levels: Iterable[_Hashable],
match_atleast: Optional[bool] = False,
match_order: Optional[bool] = False,
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
"""Check if the dataframe has all index level names.
Parameters
----------
df : :class:`~pandas.DataFrame`
The dataframe to check
index_levels : list
Set of index level names to check
match_atleast : bool, optional
Whether the MultiIndex columns have to have at least the specified column levels (``True``)
or exactly match the column levels (``False``)
match_order : bool, optional
Whether to also match the level order
raise_exception : bool, optional
whether to raise an exception or return a bool value
Returns
-------
``True`` if ``df`` has the expected index level names, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``df`` does not have the expected index level names
"""
return _multiindex_level_names_helper(
df,
level_names=index_levels,
idx_or_col="index",
match_atleast=match_atleast,
match_order=match_order,
raise_exception=raise_exception,
)
def _assert_has_columns(
df: pd.DataFrame,
columns_sets: Sequence[Union[List[_Hashable], List[str], pd.Index]],
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
"""Check if the dataframe has at least all columns sets.
Parameters
----------
df : :class:`~pandas.DataFrame`
The dataframe to check
columns_sets : list
Column set or list of column sets to check
raise_exception : bool, optional
whether to raise an exception or return a bool value
Returns
-------
``True`` if ``df`` has the expected column names, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``df`` does not have the expected index level names
Examples
--------
>>> df = pd.DataFrame()
>>> df.columns = ["col1", "col2"]
>>> _assert_has_columns(df, [["other_col1", "other_col2"], ["col1", "col2"]])
>>> # This raises no error, as df contains all columns of the second set
"""
columns = df.columns
result = False
for col_set in columns_sets:
result = result or all(v in columns for v in col_set)
if result is False:
if len(columns_sets) == 1:
helper_str = "the following columns: {}".format(columns_sets[0])
else:
helper_str = "one of the following sets of columns: {}".format(columns_sets)
if raise_exception:
raise ValidationError(
"The dataframe is expected to have {}. Instead it has the following columns: {}".format(
helper_str, list(df.columns)
)
)
return result
def _assert_has_column_multiindex(
df: pd.DataFrame,
expected: Optional[bool] = True,
nlevels: Optional[int] = 2,
nlevels_atleast: Optional[int] = False,
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
"""Check if a :any:`pandas.DataFrame` has a :any:`pandas.MultiIndex` as columns.
Parameters
----------
df : :class:`~pandas.DataFrame`
The dataframe to check
expected : bool, optional
Whether the df is expected to have MultiIndex column or not
nlevels : int, optional
If MultiIndex is expected, how many levels the MultiIndex columns should have
nlevels_atleast : bool, optional
Whether the MultiIndex has to have at least ``nlevels`` (``True``)
or exactly match the number of levels (``False``)
raise_exception : bool, optional
Whether to raise an exception or return a bool value
Returns
-------
``True`` if ``df`` meets the expected column index format, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception` is ``True`` and ``df`` does not meet the expected column index format
"""
return _multiindex_check_helper(
df=df,
idx_or_col="column",
expected=expected,
nlevels=nlevels,
nlevels_atleast=nlevels_atleast,
raise_exception=raise_exception,
)
def _assert_has_columns_any_level(
df: pd.DataFrame,
columns_sets: Sequence[Union[List[_Hashable], List[str], pd.Index]],
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
"""Check if the dataframe has the expected set of column names at any level of a :any:`pandas.MultiIndex`.
Parameters
----------
df : :class:`~pandas.DataFrame`
The dataframe to check
columns_sets : list
Column set of list of column sets to check
raise_exception : bool, optional
whether to raise an exception or return a bool value
Returns
-------
``True`` if ``df`` has the expected column names at any :any:`pandas.MultiIndex` level,
``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``df`` does not have the expected column names
Examples
--------
>>> df = pd.DataFrame()
>>> df.columns = pd.MultiIndex.from_tuples([("Name", "col1"), ("Name", "col2")])
>>> _assert_has_columns_any_level(df, [["col1", "col2"]])
>>> # This raises no error, as df contains all columns in the seconds level
"""
_assert_has_column_multiindex(df, expected=True, nlevels_atleast=True)
column_levels = [np.array(df.columns.get_level_values(i)) for i in range(df.columns.nlevels)]
result = False
for columns in column_levels:
for col_set in columns_sets:
result = result or all(v in columns for v in col_set)
if result is False:
if len(columns_sets) == 1:
helper_str = "the following columns: {}".format(columns_sets[0])
else:
helper_str = "one of the following sets of columns: {}".format(columns_sets)
if raise_exception:
raise ValidationError(
"The dataframe is expected to have {} at any level of the MultiIndex. Instead it has the "
"following MultiIndex columns: {}".format(helper_str, column_levels)
)
return result
def _assert_has_column_levels(
df: pd.DataFrame,
column_levels: Iterable[_Hashable],
match_atleast: Optional[bool] = False,
match_order: Optional[bool] = False,
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
"""Check if the dataframe has all column level names of a MultiIndex column.
Parameters
----------
df : :class:`~pandas.DataFrame`
The dataframe to check
column_levels : list
Set of column level names to check
match_atleast : bool, optional
Whether the MultiIndex columns have to have at least the specified column levels (``True``)
or exactly match the column levels (``False``)
match_order : bool, optional
Whether to also match the level order
raise_exception : bool, optional
Whether to raise an exception or return a bool value
Returns
-------
``True`` if ``df`` has the expected column level names, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``df`` does not have the expected index level names
"""
return _multiindex_level_names_helper(
df,
level_names=column_levels,
idx_or_col="column",
match_atleast=match_atleast,
match_order=match_order,
raise_exception=raise_exception,
)
def _assert_value_range(
data: Union[pd.DataFrame, pd.Series],
value_range: Sequence[Union[int, float]],
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
"""Check if all values are within the specified range.
Parameters
----------
data : :class:`~pandas.DataFrame`
data to check values
value_range : tuple of numbers
value range in the format [min_val, max_val]
raise_exception : bool, optional
Whether to raise an exception or return a bool value
Returns
-------
``True`` if all values in ``data`` are within ``value_range``, ``False`` otherwise
(if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValueRangeError`
if ``raise_exception`` is ``True`` and any value of ``data`` is not within ``value_range``
"""
max_val = np.nanmax(data)
min_val = np.nanmin(data)
if not (min_val >= value_range[0] and max_val <= value_range[1]):
if raise_exception:
raise ValueRangeError(
"Some of the values are out of the expected range. "
"Expected were values in the range {}, got values in the range {}. "
"If values are part of questionnaire scores, "
"you can convert questionnaire items into the correct range by calling "
"`biopsykit.questionnaire.utils.convert_scale()`.".format(value_range, [min_val, max_val])
)
return False
return True
def _assert_num_columns(
data: pd.DataFrame, num_cols: Union[int, Sequence[int]], raise_exception: Optional[bool] = True
) -> Optional[bool]:
"""Check if dataframe has (any of) the required number of columns.
Parameters
----------
data : :class:`~pandas.DataFrame`
data to check
num_cols : int or list of int
the required number of columns (or any of the required number of columns in case ``num_cols`` is a list)
raise_exception : bool, optional
Whether to raise an exception or return a bool value
Returns
-------
``True`` if ``data`` has the required number of columns, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``data`` does not have the required number of columns
"""
if isinstance(num_cols, int):
num_cols = [num_cols]
if not any(len(data.columns) == num for num in num_cols):
if raise_exception:
raise ValidationError(
"The dataframe does not have the required number of columns. "
"Expected were any of {} columns, but has {} columns.".format(num_cols, len(data.columns))
)
return False
return True
def _assert_len_list(data: Sequence, length: int, raise_exception: Optional[bool] = True) -> Optional[bool]:
"""Check if a list has the required length.
Parameters
----------
data : list
list to check
length : int
the required length or the list
raise_exception : bool, optional
Whether to raise an exception or return a bool value
Returns
-------
``True`` if ``data`` has the required length, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``data`` does not have the required length
"""
_assert_is_dtype(data, (list, tuple, np.ndarray))
if len(data) != length:
if raise_exception:
raise ValidationError(
"The list does not have the required length. "
"Expected was length {}, but it has length {}.".format(length, len(data))
)
return False
return True
def _assert_dataframes_same_length(
df_list: Sequence[pd.DataFrame], raise_exception: Optional[bool] = True
) -> Optional[bool]:
"""Check if all dataframes have same length.
Parameters
----------
df_list : list
list of dataframes to check
raise_exception : bool, optional
Whether to raise an exception or return a bool value
Returns
-------
``True`` if all dataframes in ``df_list`` have same length, ``False`` otherwise
(if ``raise_exception`` is ``False``)
Raises
------
:exc:`~biopsykit.exceptions.ValidationError`
if ``raise_exception`` is ``True`` and ``data`` does not have the required length
"""
if len(set(len(df) for df in df_list)) != 1:
if raise_exception:
raise ValidationError("Not all dataframes have the same length!")
return False
return True
def _multiindex_level_names_helper_get_expected_levels(
ac_levels: Sequence[str],
ex_levels: Sequence[str],
match_atleast: Optional[bool] = False,
match_order: Optional[bool] = False,
) -> bool:
if match_order:
if match_atleast:
ac_levels_slice = ac_levels[: len(ex_levels)]
expected = ex_levels == ac_levels_slice
else:
expected = ex_levels == ac_levels
else:
if match_atleast:
expected = all(level in ac_levels for level in ex_levels)
else:
expected = sorted(ex_levels) == sorted(ac_levels)
return expected
def _multiindex_level_names_helper(
df: pd.DataFrame,
level_names: Iterable[_Hashable],
idx_or_col: str,
match_atleast: Optional[bool] = False,
match_order: Optional[bool] = False,
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
if isinstance(level_names, str):
level_names = [level_names]
ex_levels = list(level_names)
if idx_or_col == "index":
ac_levels = list(df.index.names)
else:
ac_levels = list(df.columns.names)
expected = _multiindex_level_names_helper_get_expected_levels(ac_levels, ex_levels, match_atleast, match_order)
if not expected:
if raise_exception:
raise ValidationError(
"The dataframe is expected to have exactly the following {} level names {}, "
"but it has {}".format(idx_or_col, level_names, ac_levels)
)
return False
return True
def _multiindex_check_helper(
df: pd.DataFrame,
idx_or_col: str,
expected: Optional[bool] = True,
nlevels: Optional[int] = 2,
nlevels_atleast: Optional[int] = False,
raise_exception: Optional[bool] = True,
) -> Optional[bool]:
has_multiindex, nlevels_act = _multiindex_check_helper_get_levels(df, idx_or_col)
if has_multiindex is not expected:
return _multiindex_check_helper_not_expected(idx_or_col, nlevels, nlevels_act, expected, raise_exception)
if has_multiindex is True:
if nlevels_atleast:
expected = nlevels_act >= nlevels
else:
expected = nlevels_act == nlevels
if not expected:
if raise_exception:
raise ValidationError(
"The dataframe is expected to have a MultiIndex with {0} {1} levels. "
"But it has a MultiIndex with {2} {1} levels.".format(nlevels, idx_or_col, nlevels_act)
)
return False
return True
def _multiindex_check_helper_get_levels(df: pd.DataFrame, idx_or_col: str) -> Tuple[bool, int]:
if idx_or_col == "index":
has_multiindex = isinstance(df.index, pd.MultiIndex)
nlevels_act = df.index.nlevels
else:
has_multiindex = isinstance(df.columns, pd.MultiIndex)
nlevels_act = df.columns.nlevels
return has_multiindex, nlevels_act
def _multiindex_check_helper_not_expected(
idx_or_col: str, nlevels: int, nlevels_act: int, expected: bool, raise_exception: bool
) -> Optional[bool]:
if not expected:
if raise_exception:
raise ValidationError(
"The dataframe is expected to have a single level as {0}. "
"But it has a MultiIndex with {1} {0} levels.".format(idx_or_col, nlevels_act)
)
return False
if raise_exception:
raise ValidationError(
"The dataframe is expected to have a MultiIndex with {0} {1} levels. "
"It has just a single normal {1} level.".format(nlevels, idx_or_col)
)
return False
def _assert_has_column_prefix(
columns: Sequence[str], prefix: str, raise_exception: Optional[bool] = True
) -> Optional[bool]:
"""Check whether all columns start with the same prefix.
Parameters
----------
columns : list of str
list of column names
prefix : str
expected prefix of all columns
raise_exception : bool, optional
Whether to raise an exception or return a bool value
Returns
-------
``True`` if ``columns`` all start with ``prefix``, ``False`` otherwise (if ``raise_exception`` is ``False``)
Raises
------
ValidationError
if ``raise_exception`` is ``True`` and one of ``columns`` is not a string or does not start with ``prefix``
"""
if prefix is None or len(prefix) == 0:
if raise_exception:
raise ValidationError("'prefix' is None or empty!")
return False
for col in columns:
return _check_has_column_prefix_single_col(columns, col, prefix, raise_exception)
return True
def _check_has_column_prefix_single_col(
columns: Sequence[str], col: Any, prefix: str, raise_exception: bool
) -> Optional[bool]:
if not _assert_is_dtype(col, str, raise_exception=False):
if raise_exception:
raise ValidationError("Column '{}' from {} is not a string!".format(col, columns))
return False
if not col.startswith(prefix):
if raise_exception:
raise ValidationError(
"Column '{}' from {} are starting with the required prefix '{}'!".format(col, columns, prefix)
)
return False
return True
| nilq/baby-python | python |
from terminaltables import SingleTable
import requests
import os
from dotenv import load_dotenv
def predict_salary(min_salary, max_salary):
if min_salary == None or min_salary == 0:
average_salary = max_salary*0.8
elif max_salary == None or max_salary == 0:
average_salary = min_salary*1.2
else:
average_salary = ((max_salary+min_salary)/2)
return average_salary
def get_vacancies_hh(profession):
hh_vacancies = []
page = 0
pages = 1
while page < pages:
url = 'https://api.hh.ru/vacancies'
user_request = {'text': profession, 'area': '4', 'period': '30',
'per_page': '10', 'page': page}
page_response = requests.get(url, params=user_request)
pages = page_response.json()['pages']
page += 1
page_answer_hh = page_response.json()
hh_vacancies.append(page_answer_hh)
return hh_vacancies
def predict_rub_salary_hh(hh_vacancies, profession):
total_vacancies = hh_vacancies[0]['found']
total_salary = 0
total_number = 0
for vacancy in hh_vacancies:
prepare_vacancies = vacancy['items']
number = 0
sum_salary = 0
total_average_salary = 0
for prepare_vacancy in prepare_vacancies:
if prepare_vacancy['salary'] is not None:
salary = prepare_vacancy['salary']
if salary['currency'] == 'RUR':
number += 1
min_salary = salary['from']
max_salary = salary['to']
average_salary = predict_salary(min_salary, max_salary)
sum_salary += average_salary
total_salary += sum_salary
total_number += number
try:
total_average_salary = int(total_salary/total_number)
except ZeroDivisionError:
pass
hh_response = [profession, total_vacancies, total_number, total_average_salary]
return hh_response
def get_vacancies_sj(profession, secret_key_sj):
sj_vacancies = []
page = 0
pages = 1
while page < pages:
url = 'https://api.superjob.ru/2.0/vacancies/'
headers = {'X-Api-App-Id': secret_key_sj}
user_request = {'keyword': profession,
'town': 4,
'period': 30,
'count': 10,
'page': page}
page_response = requests.get(url, headers=headers, params=user_request)
page_response.raise_for_status()
more_vacancies = page_response.json()['more']
if more_vacancies:
page += 1
pages += 1
if not more_vacancies:
break
page_answer_sj = page_response.json()
sj_vacancies.append(page_answer_sj)
return sj_vacancies
def predict_rub_salary_sj(sj_vacancies, profession):
total_vacancies = sj_vacancies[0]['total']
total_salary = 0
total_number = 0
for vacancy in sj_vacancies:
prepare_vacancies = vacancy['objects']
number = 0
sum_salary = 0
total_average_salary = 0
for prepare_vacancy in prepare_vacancies:
if prepare_vacancy['currency'] == 'rub':
min_salary = prepare_vacancy['payment_from']
max_salary = prepare_vacancy['payment_to']
if min_salary or max_salary != 0:
number += 1
average_salary = predict_salary(min_salary, max_salary)
sum_salary += average_salary
total_salary += sum_salary
total_number += number
try:
total_average_salary = int(total_salary/total_number)
except ZeroDivisionError:
pass
sj_response = [profession, total_vacancies, total_number, total_average_salary]
return sj_response
def get_table(table, title):
table_template = [['Язык программирования', 'Вакансий найдено',
'Вакансий обработано', 'Средняя зарплата'], ]
for line in table:
table_template.append(line)
table_instance = SingleTable(table_template, title)
table_instance.justify_columns[2] = 'right'
table_result = table_instance.table
return table_result
def main():
load_dotenv()
secret_key_sj = os.getenv('SECRET_KEY')
table_hh = []
table_sj = []
professions = ("C#", "Objective-C",
"Ruby", "Java", "C",
"Typescript", "Scala",
"Go", "Swift",
"C++", "PHP",
"JavaScript", "Python")
for profession in professions:
hh_vacancies = get_vacancies_hh(profession)
hh_response = predict_rub_salary_hh(hh_vacancies, profession)
table_hh.append(hh_response)
title_hh = 'HEADHUNTER_MOSCOW'
sj_vacancies = get_vacancies_sj(profession, secret_key_sj)
try:
sj_response = predict_rub_salary_sj(sj_vacancies, profession)
table_sj.append(sj_response)
except (IndexError, ValueError):
pass
title_sj = 'SUPERJOB_MOSCOW'
print (get_table(table_hh, title_hh))
print()
print (get_table(table_sj, title_sj))
print()
if __name__ == '__main__':
main()
| nilq/baby-python | python |
import numpy as np
import matplotlib.pyplot as plt
teilnehmer = int(input("Teilnehmer: "))
a = list()
x = np.arange(1, teilnehmer+1)
y = np.zeros(teilnehmer)
a.append(float(input("Geheimnis: ")))
for i in range(teilnehmer-1):
a.append(float(input(f"Koeffizient a{i+1}: ")))
for i in range(teilnehmer):
for j in range(len(x)):
y[i]+=a[j]*x[i]**j
for i in range(teilnehmer):
print(f"Punkt für Teilnehmer {i+1}: x{i+1} = {x[i]}, y{i+1} = {y[i]}")
berechnetes_geheimnis = 0
for i in range(teilnehmer):
lagrange = 1
for j in range(teilnehmer):
if i != j:
print(f"((0 - {x[j]})/({x[i]} - {x[j]})) * ", end = '')
lagrange *= (0 - x[j])/(x[i] - x[j])
print(f"{y[i]} = {lagrange * y[i]}")
berechnetes_geheimnis += lagrange * y[i]
print(f"Berechnetes Geheimnis: {berechnetes_geheimnis}")
p = np.poly1d(np.polyfit(x, y, teilnehmer-1))
x_plot = np.linspace(-2, 6, 100)
_ = plt.plot(x, y, '.', x_plot, p(x_plot), '-')
plt.ylim(0, 20)
plt.show()
| nilq/baby-python | python |
#/usr/bin/env python
from __future__ import absolute_import
# Charge transfer efficiency by EPER, now as a pipe task!
import lsst.pex.config as pexConfig
import lsst.pipe.base as pipeBase
import sys
import numpy as np
import argparse
from .MaskedCCD import MaskedCCD
import lsst.geom as lsstGeom
import lsst.afw.math as afwMath
from lsst.eotest.Estimator import Estimator
class SubImage(object):
"""Functor to produce sub-images depending on scan direction."""
def __init__(self, ccd, amp, overscans, task):
geom = ccd.amp_geom
self.ccd = ccd
self.imaging = geom.imaging
self.image = ccd[amp] # This is the masked image for the desired amp.
if task.config.direction == 'p':
self._bbox = self._parallel_box
llc = lsstGeom.Point2I(geom.parallel_overscan.getMinX(),
geom.parallel_overscan.getMinY() + overscans)
urc = geom.parallel_overscan.getCorners()[2]
self._bias_reg = lsstGeom.Box2I(llc, urc)
self.lastpix = self.imaging.getMaxY()
elif task.config.direction == 's':
self._bbox = self._serial_box
llc = lsstGeom.Point2I(geom.serial_overscan.getMinX() + overscans,
geom.serial_overscan.getMinY())
urc = geom.serial_overscan.getCorners()[2]
#
# Omit the last 4 columns to avoid the bright column in the
# last overscan column in the e2v vendor data.
#
urc[0] -= 4
self._bias_reg = lsstGeom.Box2I(llc, urc)
self.lastpix = self.imaging.getMaxX()
else:
task.log.error("Unknown scan direction: " + str(direction))
sys.exit(1)
def bias_est(self, statistic=afwMath.MEAN, gain=1):
subim = self.image.Factory(self.image, self._bias_reg)
bias_estimate = Estimator()
bias_estimate.value = \
gain*afwMath.makeStatistics(subim, statistic).getValue()
num_pix = len(subim.getImage().getArray().flatten())
bias_estimate.error = \
gain*afwMath.makeStatistics(subim, afwMath.STDEV).getValue()/np.sqrt(float(num_pix))
return bias_estimate
def __call__(self, start, end=None):
if end is None:
end = start
my_exp = self.image.Factory(self.image, self._bbox(start, end))
return my_exp
def _parallel_box(self, start, end):
llc = lsstGeom.PointI(self.imaging.getMinX(), start)
urc = lsstGeom.PointI(self.imaging.getMaxX(), end)
return lsstGeom.BoxI(llc, urc)
def _serial_box(self, start, end):
llc = lsstGeom.PointI(start, self.imaging.getMinY())
urc = lsstGeom.PointI(end, self.imaging.getMaxY())
return lsstGeom.BoxI(llc, urc)
class EPERConfig(pexConfig.Config):
"""Configuration for the EPERTask."""
direction = pexConfig.Field("Select either parallel or serial direction",
str, default="p")
verbose = pexConfig.Field("Turn verbosity on", bool, default=True)
cti = pexConfig.Field('Return CTI instead of CTE', bool, default=False)
class EPERTask(pipeBase.Task):
"""Task to calculate either parallel or serial charge transfer
efficiency via EPER."""
ConfigClass = EPERConfig
_DefaultName = "eper"
@pipeBase.timeMethod
def run(self, infilename, nframes, amps, overscans, gains=None,
mask_files=(), linearity_correction=None):
if not infilename:
self.log.error("Please specify an input file path.")
sys.exit(1)
if gains is None:
gains = dict([(amp, 1) for amp in amps])
ccd = MaskedCCD(infilename, mask_files=mask_files,
linearity_correction=linearity_correction)
# iterate through amps
cte = {}
bias_estimates = {}
for amp in amps:
subimage = SubImage(ccd, amp, overscans, self)
lastpix = subimage.lastpix
# find signal in last image vector (i.e., row or column)
last_im = Estimator(subimage(lastpix), ccd.stat_ctrl,
gain=gains[amp], var_wt=nframes)
if self.config.verbose:
self.log.info("Last imaging row/column = " + str(last_im))
# find signal in each overscan vector
overscan_ests = []
for i in range(1, overscans+1):
overscan_ests.append(Estimator(subimage(lastpix+i),
ccd.stat_ctrl, gain=gains[amp],
var_wt=nframes))
if self.config.verbose:
self.log.info("Overscan values = " + str(overscan_ests))
# sum medians of first n overscan rows
summed = sum(overscan_ests)
if self.config.verbose:
self.log.info("summed overscans = " + str(summed))
# Find bias level.
bias_est = subimage.bias_est(gain=gains[amp],
statistic=afwMath.MEAN)
bias_estimates[amp] = bias_est
if self.config.verbose:
self.log.info("bias value = " + str(bias_est))
# signal = last - bias
sig = last_im - bias_est
# trailed = sum(last2) - bias
trailed = summed - overscans*bias_est
# charge loss per transfer = (trailed/signal)/N
chargelosspt = (trailed/sig)/(lastpix + 1.)
if self.config.cti:
cte[amp] = chargelosspt
cte[amp].set_format_str("{0:.5e}")
else:
cte[amp] = 1. - chargelosspt
cte[amp].set_format_str("{0:.16f}")
if self.config.verbose:
if self.config.cti:
self.log.info('cti, amp ' + str(amp) + " = "
+ str(cte[amp]) + '\n')
else:
self.log.info('cte, amp ' + str(amp) + " = "
+ str(cte[amp]) + '\n')
return cte, bias_estimates
if __name__ == '__main__':
#import pdb; pdb.set_trace()
parser = argparse.ArgumentParser(description='Calculate either parallel or serial CTE via EPER.')
parser.add_argument('infilename', help="image file to be used for analysis")
parser.add_argument('-o', '--overscans',
help="number of overscan rows/columns to use", type=int, default=3)
parser.add_argument('-d', '--direction',
help="specify either parallel ('p') or serial ('s') direction", default='p')
parser.add_argument('-a', '--amps', help="amps to be analyzed, separated by a space",
type=int, nargs='+', default=list(range(1, 17)))
parser.add_argument('-v', '--verbose', help="turn verbosity on", action='store_true', default=False)
parser.add_argument('-i', '--cti', help='return CTI (not CTE)',
action='store_true', default=False)
args = parser.parse_args()
task = EPERTask()
task.config.direction = args.direction
task.config.verbose = args.verbose
task.config.cti = args.cti
task.run(args.infilename, args.amps, args.overscans)
| nilq/baby-python | python |
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, DateTime
from sqlalchemy.ext.declarative import as_declarative, declared_attr
from sqlalchemy.orm import sessionmaker, scoped_session
from config.config import SQLALCHEMY_DATABASE_URI
engine = create_engine(SQLALCHEMY_DATABASE_URI)
Session = scoped_session(sessionmaker(bind=engine))
@as_declarative()
class Base:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=datetime.now)
updated_at = Column(DateTime, default=datetime.now)
@classmethod
def count(cls):
session = Session()
return session.query(cls).count()
| nilq/baby-python | python |
from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
N = len(digits)
for i in reversed(range(N)):
digit = digits[i]
if digit == 9:
digits[i] = 0
else:
digits[i] += 1
return digits
digits[0] = 1
digits.append(0)
return digits
| nilq/baby-python | python |
import timeit
import CoolProp.CoolProp as CP
def time_check(N, h, p, TTSE = False, mode = 'TTSE'):
if TTSE:
if mode =='TTSE':
setup = "import CoolProp; import CoolProp.CoolProp as CP; CP.enable_TTSE_LUT('Water'); CP.set_TTSE_mode('Water','TTSE'); CP.Props('T','H',500,'P',10000,'Water'); IWater = CP.get_Fluid_index('Water'); from CoolProp.param_constants import iT,iH,iP,iD"
elif mode =='BICUBIC':
setup = "import CoolProp; import CoolProp.CoolProp as CP; CP.enable_TTSE_LUT('Water'); CP.set_TTSE_mode('Water','BICUBIC'); CP.Props('T','H',500,'P',10000,'Water'); IWater = CP.get_Fluid_index('Water'); from CoolProp.param_constants import iT,iH,iP,iD"
else:
raise ValueError()
else:
setup = "import CoolProp.CoolProp as CP; IWater = CP.get_Fluid_index('Water'); CP.disable_TTSE_LUT('Water'); from CoolProp.param_constants import iT,iH,iP,iD"
time = timeit.Timer("CP.IProps(iD,iH,"+str(h)+",iP,"+str(p)+",IWater)",setup).timeit(N)/N*1e6
value = CP.Props('D','H',h,'P',p,'Water')
return time, value
values = dict(subcooled = (500,10000), twophase = (2000,10000), superheated = (3000,10000), supercritical = (2000,30000))
N = 10000
for k in ['subcooled','twophase','superheated','supercritical']:
h, p = values[k]
time_EOS, value_EOS = time_check(N, h, p, TTSE = False)
time_TTSE, value_TTSE = time_check(N, h, p, TTSE = True)
time_BICUBIC, value_BICUBIC = time_check(N, h, p, TTSE = True, mode='BICUBIC')
print("%s %s %s %s %s %s %s" % (k, h, p, (value_TTSE/value_EOS-1.0)*100, (value_BICUBIC/value_EOS-1.0)*100, time_EOS/time_TTSE, time_EOS/time_BICUBIC))
| nilq/baby-python | python |
import numpy as np
import pandas as pd
import logging
logger = logging.getLogger(__name__)
def approximate_curve(data, bin_number):
binned = pd.cut(data.capacity_factor, bin_number)
# bins = np.arange(1, len(data.datetime) / bin_number + 1)
# logger.debug("bins: {}".format(bins))
# digitized = np.digitize(data, bins)
# bin_means = [data[digitized == i].mean()
# for i in range(1, len(bin_number))]
return binned
| nilq/baby-python | python |
"""There is a vehicle obscuring a pedestrian that conflicts with your path."""
from flow.envs.multiagent import Bayesian0NoGridEnv
from flow.networks import Bayesian1Network
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import SumoCarFollowingParams, VehicleParams
from flow.core.params import PedestrianParams
from flow.controllers import SimCarFollowingController, GridRouter, RLController
from flow.utils.registry import make_create_env
from flow.utils.rllib import FlowParamsEncoder
# Experiment parameters
N_ROLLOUTS = 20 # number of rollouts per training iteration
N_CPUS = 8 # number of parallel workers
# Environment parameters
# TODO(@klin) make sure these parameters match what you've set up in the SUMO version here
V_ENTER = 30 # enter speed for departing vehicles
INNER_LENGTH = 50 # length of inner edges in the traffic light grid network
# number of vehicles originating in the left, right, top, and bottom edges
N_LEFT, N_RIGHT, N_TOP, N_BOTTOM = 0, 1, 1, 1
def make_flow_params():
"""
Generate the flow params for the experiment.
Parameters
----------
Returns
-------
dict
flow_params object
"""
pedestrian_params = PedestrianParams()
pedestrian_params.add(
ped_id='ped_0',
depart_time='0.00',
start='(1.0)--(1.1)',
end='(1.1)--(1.2)',
depart_pos='40')
# we place a sufficient number of vehicles to ensure they confirm with the
# total number specified above. We also use a "right_of_way" speed mode to
# support traffic light compliance
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {}),
car_following_params=SumoCarFollowingParams(
min_gap=2.5,
max_speed=V_ENTER,
decel=7.5, # avoid collisions at emergency stops
speed_mode="right_of_way",
),
routing_controller=(GridRouter, {}),
num_vehicles=2)
vehicles.add(
veh_id='rl',
acceleration_controller=(RLController, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="aggressive",
),
routing_controller=(GridRouter, {}),
num_vehicles=1)
'''
vehicles.add(
veh_id="human_1",
acceleration_controller=(SimCarFollowingController, {}),
car_following_params=SumoCarFollowingParams(
min_gap=2.5,
max_speed=V_ENTER,
decel=7.5, # avoid collisions at emergency stops
speed_mode="right_of_way",
),
routing_controller=(GridRouter, {}),
num_vehicles=1)
'''
n_rows = 1
n_columns = 1
# define initial configs to pass into dict
initial_config = InitialConfig(
spacing='custom',
shuffle=False,
sidewalks=True,
lanes_distribution=float('inf'))
flow_params = dict(
# name of the experiment
exp_tag="bayesian_1_env",
# name of the flow environment the experiment is running on
env_name=Bayesian0NoGridEnv,
# name of the network class the experiment is running on
network=Bayesian1Network,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
restart_instance=False,
sim_step=0.1,
render=False,
),
env=EnvParams(
horizon=500,
# environment related parameters (see flow.core.params.EnvParams)
additional_params={
# maximum acceleration of autonomous vehicles
'max_accel': 2.6,
# maximum deceleration of autonomous vehicles
'max_decel': 4.5,
# desired velocity for all vehicles in the network, in m/s
"target_velocity": 25,
# how many objects in our local radius we want to return
"max_num_objects": 3,
# how large of a radius to search in for a given vehicle in meters
"search_veh_radius": 50,
# how large of a radius to search for pedestrians in for a given vehicle in meters (create effect of only seeing pedestrian only when relevant)
"search_ped_radius": 22,
# whether or not we have a discrete action space,
"discrete": False,
# whether to randomize which edge the vehicles are coming from
"randomize_vehicles": False,
# whether to append the prior into the state
"inference_in_state": False,
# whether to grid the cone "search_veh_radius" in front of us into 6 grid cells
"use_grid": False
},
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
additional_params={
"speed_limit": V_ENTER + 5, # inherited from grid0 benchmark
"grid_array": {
"inner_length": INNER_LENGTH,
"row_num": n_rows,
"col_num": n_columns,
"cars_left": N_LEFT,
"cars_right": N_RIGHT,
"cars_top": N_TOP,
"cars_bot": N_BOTTOM,
},
"horizontal_lanes": 1,
"vertical_lanes": 1,
"randomize_routes": True,
},
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
ped=pedestrian_params,
# parameters specifying the positioning of vehicles upon initialization
# or reset (see flow.core.params.InitialConfig)
initial = initial_config
)
return flow_params
# define callbacks for tensorboard
| nilq/baby-python | python |
from datetime import datetime, timedelta
from typing import Optional
from utils.utils import format_date
class Event:
"""Event object to store data about a Google Calendar event"""
def __init__(
self,
event_id: str,
link: str,
title: str,
location: Optional[str],
description: Optional[str],
all_day: bool,
start: datetime,
end: datetime,
):
self.__id = event_id
self.__link = link
self.__title = title
self.__location = location
self.__description = description
self.__all_day = all_day
self.__start = start.replace(tzinfo=None)
self.__end = end.replace(tzinfo=None)
@property
def id(self) -> str:
"""Returns the event id"""
return self.__id
@property
def link(self) -> str:
"""Returns the link to the event in Google Calendar"""
return self.__link
@property
def title(self) -> str:
"""Returns the title of the event"""
return self.__title
@property
def location(self) -> Optional[str]:
"""Returns the location of the event"""
return self.__location
@property
def description(self) -> Optional[str]:
"""Returns the description of the event"""
return self.__description
@property
def all_day(self) -> bool:
"""Returns whether or not the event is an all day event"""
return self.__all_day
@property
def start(self) -> datetime:
"""Returns the start date as a datetime object"""
return self.__start
@property
def end(self) -> datetime:
"""Returns the end date as a datetime object"""
return self.__end
@property
def __one_day(self) -> bool:
"""Returns whether or not the event is a one day event"""
return self.all_day and self.end - self.start <= timedelta(days=1)
def relative_date_range_str(self, base=datetime.now()) -> str:
"""Returns a formatted string of the start to end date range"""
start_str = self.__relative_start_str(base=base)
end_str = self.__relative_end_str(base=self.start)
# all day event
if self.__one_day:
return f"{start_str} - All day"
# include end time if it is not the same as the start time
return f"{start_str} - {end_str}" if end_str else start_str
def __relative_start_str(self, base=datetime.now()) -> str:
"""Returns a formatted string of the start date"""
return format_date(self.start, all_day=self.all_day, base=base) or "Today"
def __relative_end_str(self, base=datetime.now()) -> str:
"""Returns a formatted string of the end date"""
end_date = self.end
# use previous day if end of multi-day, all-day event
if self.all_day and not self.__one_day:
end_date -= timedelta(days=1)
return format_date(end_date, all_day=self.all_day, base=base)
| nilq/baby-python | python |
from __future__ import absolute_import, division, print_function
from cctbx.array_family.flex import ( # noqa: F401; lgtm
abs,
acos,
arg,
asin,
atan,
atan2,
bool,
ceil,
compare_derivatives,
complex_double,
condense_as_ranges,
conj,
cos,
cosh,
cost_of_m_handle_in_af_shared,
double,
double_from_byte_str,
double_range,
empty_container_sizes_double,
empty_container_sizes_int,
exercise_versa_packed_u_to_flex,
exp,
extract_double_attributes,
fabs,
first_index,
flex_argument_passing,
float,
float_range,
floor,
fmod,
fmod_positive,
get_random_seed,
grid,
hendrickson_lattman,
histogram,
imag,
int,
int_from_byte_str,
int_range,
integer_offsets_vs_pointers,
intersection,
last_index,
linear_correlation,
linear_interpolation,
linear_regression,
linear_regression_core,
log,
log10,
long,
long_range,
mat3_double,
max,
max_absolute,
max_default,
max_index,
mean,
mean_and_variance,
mean_default,
mean_sq,
mean_sq_weighted,
mean_weighted,
median,
median_functor,
median_statistics,
mersenne_twister,
miller_index,
min,
min_default,
min_index,
min_max_mean_double,
nested_loop,
norm,
order,
permutation_generator,
polar,
pow,
pow2,
product,
py_object,
random_bool,
random_double,
random_double_point_on_sphere,
random_double_r3_rotation_matrix,
random_double_r3_rotation_matrix_arvo_1992,
random_double_unit_quaternion,
random_generator,
random_int_gaussian_distribution,
random_permutation,
random_selection,
random_size_t,
reindexing_array,
rows,
select,
set_random_seed,
show,
show_count_stats,
sin,
sinh,
size_t,
size_t_from_byte_str,
size_t_range,
slice_indices,
smart_selection,
sort_permutation,
sorted,
split_lines,
sqrt,
std_string,
sum,
sum_sq,
sym_mat3_double,
tan,
tanh,
tiny_size_t_2,
to_list,
union,
vec2_double,
vec3_double,
vec3_int,
weighted_histogram,
xray_scatterer,
)
from dials.array_family.flex_ext import ( # noqa: F401; lgtm
real,
reflection_table_selector,
)
from dials_array_family_flex_ext import ( # noqa: F401; lgtm
Binner,
PixelListShoeboxCreator,
int6,
observation,
reflection_table,
reflection_table_to_list_of_reflections,
shoebox,
)
| nilq/baby-python | python |
# @Author: BingWu Yang <detailyang>
# @Date: 2016-03-29T17:47:44+08:00
# @Email: [email protected]
# @Last modified by: detailyang
# @Last modified time: 2016-04-10T16:54:56+08:00
# @License: The MIT License (MIT)
import ply.yacc as yacc
import eslast as ast
from esllexer import ESLLexer
tokens = ESLLexer.tokens
def p_request(p):
'''request : URL
| URL METHOD
| URL METHOD OPTIONS'''
if len(p) == 2:
p[0] = ast.RequestNode(ast.MethodNode('GET'), ast.URLNode(p[1]), None)
elif len(p) == 3:
p[0] = ast.RequestNode(ast.MethodNode(p[2]), ast.URLNode(p[1]), None)
else:
p[0] = ast.RequestNode(ast.MethodNode(p[2]), ast.URLNode(p[1]), p[3])
def p_options(p):
'''OPTIONS :
| OPTION
| OPTIONS OPTION'''
if len(p) == 2:
p[0] = ast.OptionListNode([p[1]])
else:
p[0] = p[1].append(p[2])
def p_option_empty(p):
' OPTION : empty '
p[0] = p[1]
def p_option_header(p):
' OPTION : HEADERVALUE '
p[0] = p[1]
def p_option_querystring(p):
' OPTION : QUERYSTRINGVALUE '
p[0] = p[1]
def p_option_body(p):
' OPTION : BODYVALUE '
p[0] = p[1]
def p_empty(p):
'empty :'
p[0] = []
def p_querystring_value(p):
'''QUERYSTRINGVALUE : QUERYSTRING VALUE '''
p[0] = ast.OptionNode(ast.QueryStringNode(p[1]), ast.ValueNode(p[2]))
def p_querystring_shell(p):
'''QUERYSTRINGVALUE : QUERYSTRING SHELL '''
p[0] = ast.OptionNode(ast.QueryStringNode(p[1]), ast.ShellNode(p[2]))
def p_header_value(p):
'''HEADERVALUE : HEADER VALUE '''
p[0] = ast.OptionNode(ast.HeaderNode(p[1]), ast.ValueNode(p[2]))
def p_header_shell(p):
'''HEADERVALUE : HEADER SHELL '''
p[0] = ast.OptionNode(ast.HeaderNode(p[1]), ast.ShellNode(p[2]))
def p_body_value(p):
'''BODYVALUE : BODY VALUE '''
p[0] = ast.OptionNode(ast.BodyNode(p[1]), ast.ValueNode(p[2]))
def p_body_shell(p):
'''BODYVALUE : BODY SHELL '''
p[0] = ast.OptionNode(ast.BodyNode(p[1]), ast.ShellNode(p[2]))
def p_error(p):
print("Syntax Error")
print("ESL format: {URL} {METHOD} {OPTIONS}")
print("{URL}: https://example.com|examples.com|/api/endpoints")
print("{METHOD}: GET|get|POST|post|DELETE|delete|PUT|put")
print("{OPTIONS}: --hContent-Type=application/json")
print("{OPTIONS}: --qper_page=1")
print("{OPTIONS}: --busername=xxxx")
def parse(text):
parser = yacc.yacc(debug=True)
ast = parser.parse(text, ESLLexer().build())
return ast
if __name__ == '__main__':
ast = parse("/api/cmdb/peoples/ get --qhost_ip=!(ifconfig eth0) --qhost_name=bj-sdf --hContent-Type=abcd --bslkjsdf=123") # Test it
print(ast.left)
print(ast.method)
for option in ast.right.options:
key = option.key
value = option.value
| nilq/baby-python | python |
from discord.ext import commands
import config
class Bot(commands.Bot):
async def invoke(self, ctx):
if self.user.mentioned_in(ctx.message):
# Mention was processed in on_message.
return
if ctx.invoked_with:
await ctx.send(config.response)
async def on_message(self, message):
# bot?
if message.author.bot:
return
# mention?
if self.user.mentioned_in(message):
await message.channel.send(config.response)
return
# invoke command
await self.process_commands(message)
| nilq/baby-python | python |
#!/usr/bin/env python3
import altair as alt
import pandas
import selenium
def vegaGraphics(
cmdTag,
id1,
id2,
parameters,
sql,
transformedData,
verbose,):
"""Create interactive charts for specified data"""
# making function more explicit
cmdTag = cmdTag
id1 = id1
id2 = id2
parameters = parameters
sql = sql
transformedData = transformedData
verbose = verbose
if verbose >= 1:
print(
"Creating Vega Graphics"
)
transformedData = transformedData.rename(
columns = {
id1 : "id1",
id2 : "id2",
sql : "sql",
cmdTag : "cmdTag",
parameters : "parameters"})
dataInfo = transformedData.copy()
data = transformedData[["total_duration",
"cmdTag",
"id1",
"id2",
"sql",
"parameters"]].copy()
data = data.sort_values(by = ["total_duration"],
ascending = True,
inplace = False).dropna().reset_index(drop = True)
data["length"] = data["sql"].str.len() + data["parameters"].str.len()
alt.data_transformers.disable_max_rows()
brush = alt.selection_interval()
# -----> create the scatter plot graph
line = alt.Chart(data.reset_index()).mark_point().encode(
x = alt.X(
"length:Q",
axis = alt.Axis(title = "Query Length")),
y=alt.Y(
"total_duration:Q",
axis = alt.Axis(title = "Latency (ms)")),
color = alt.condition(
brush,
"cmdTag:N",
alt.value("lightgray")),
shape = "cmdTag:N",
tooltip = ["index:O",
"total_duration:Q",
"length:Q",
"log_time_with_tz:N",
"sql:N",
"parameters:N",
"cmdTag:N",
"id1:N",
"id2:N"]
).properties(
width = 500,
height = 500,
title = "Einherjar Queries"
).add_selection(
brush
).interactive()
# -----> display the mean via a line across our chart
rule = alt.Chart(data).mark_rule(color = "red").encode(
y = "median(total_duration):Q",
size = alt.value(2)
)
alt.Chart(data).configure_title(
fontSize = 30
)
# -----> display number of interations per table insert
dog = dataInfo[["inserted_data", "cmdTag"]].dropna()
bars1 = alt.Chart(dog).mark_bar().encode(
y = "inserted_into:N",
color = "cmdTag:N",
x = "count(inserted_into):Q"
).transform_filter(
brush
)
# -----> display number of interations per table select
cat = dataInfo[["selected_from", "cmdTag"]].dropna()
bars2 = alt.Chart(cat).mark_bar().encode(
y = "selected_from:N",
color = "cmdTag:N",
x = "count(selected_from):Q"
).transform_filter(
brush
)
# -----> add the line and rule charts to the base chart
chart = line + rule
chart = chart & bars1 & bars2
chart.save("results/data.json")
chart.save("results/data.html")
if verbose >= 1:
print(
"Vega Graphics have been completed"
) | nilq/baby-python | python |
from ark.thread_handler import ThreadHandler
from factory import Factory
import time
class GuiTasks(object):
@classmethod
def loop(cls):
time.sleep(1)
GuiTasks.get_active_threads()
@classmethod
def get_active_threads(cls):
GUI = Factory.get('GUI')
max_threads = len(ThreadHandler.activethreads)
active_threads = 0
for key,timestamp in ThreadHandler.activethreads.items():
if timestamp > (time.time()-30):
active_threads += 1
GUI.active_threads['text'] = "{} / {}".format(active_threads,max_threads)
| nilq/baby-python | python |
import inject
from flask import Flask, Response, send_from_directory, send_file
class StaticRoute:
@staticmethod
@inject.autoparams()
def init(flask: Flask) -> None:
@flask.route("/static/<path:path>")
def send_static(path: str) -> Response:
return send_from_directory(f"static", path)
@flask.route("/")
def index() -> Response:
return send_file("static/html/index.html")
| nilq/baby-python | python |
# Exercise 31: Making Decisions
print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There's a giant bear here eating a cheese cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
print "3. Turn back quietly"
print "4. Look around"
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
elif bear == "3":
print "One plank creaked and bear eats you. Good job!"
elif bear == "4":
print "There have rifle. Will you get it?"
print "1. Yes!"
print "2. No!"
rifle = raw_input("> ")
if rifle == "1":
print "Did you want to shoot the bear?"
print "1. Yes, of course!"
print "2. No!"
choice = raw_input("> ")
if choice == "1":
print """
The rifle isn't loaded!
You look around and see bullets on the table.
You are going to get them,
but the bear see you and eat you!!!
Good job! :D
"""
elif choice == "2":
print "While you thinking what to do the bear see you and eat you! Good job!"
else:
print "You can't choice other, for that you die! Good Job!"
elif rifle == "2":
print """
This is stupid decision
and what will do now?
Okey, just die. Good job!
"""
else:
print "You can't choce other, for that you die! Good Job!"
else:
print "Well, doing %s is probably better. Bear runs away." % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
else:
print "You stumble around and fall on a knife and die. Good job!"
# Study Drills:
# 1. Make new parts of the game and change what decisions people
# can make. Expand the game out as much as you can before it get
# ridiculous.
# 2. Write a copletely new game. Maybe you don't like this one, so
# make your own. This is your computer, do what you want. | nilq/baby-python | python |
# Generated by Django 3.1.1 on 2020-10-08 02:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rameniaapp', '0009_auto_20201002_0243'),
]
operations = [
migrations.CreateModel(
name='Edit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, upload_to='')),
('change', models.JSONField(blank=True, null=True)),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('noodle', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='rameniaapp.noodle')),
],
),
]
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
tableau20 = [
'steelblue', # 0
'lightsteelblue', # 1
'darkorange', # 2
'peachpuff', # 3
'green', # 4
'lightgreen', # 5
'crimson', # 6
'lightcoral', # 7
'mediumpurple', # 8
'thistle', # 9
'saddlebrown', # 10
'rosybrown', # 11
'orchid', # 12
'lightpink', # 13
'gray', # 14
'lightgray', # 15
'olive', # 16
'palegoldenrod', # 17
'mediumturquoise', # 18
'paleturquoise', # 19
]
tableau10 = [
'blue', # 'steelblue', # 0
'darkorange', # 1
'green', # 2
'crimson', # 3
'mediumpurple', # 4
'saddlebrown', # 5
'orchid', # 6
'gray', # 7
'olive', # 8
'mediumturquoise', # 9
]
tableau10_light = [
'lightsteelblue', # 0
'peachpuff', # 1
'lightgreen', # 2
'lightcoral', # 3
'thistle', # 4
'rosybrown', # 5
'lightpink', # 6
'lightgray', # 7
'palegoldenrod', # 8
'paleturquoise', # 9
]
tab10_index = [3, 0, 2, 1, 2, 4, 5, 6, 7, 8, 9]
class PlotScheme(object):
def __init__(self):
# to have a tight packing on the chart wether only the x axis or also
# the y axis have (see matplotlib)
self.ytight = False
# y-margin (top/bottom) for the subcharts. This will not overrule the
# option plotinfo.plotymargin
self.yadjust = 0.0
# Each new line is in z-order below the previous one. change it False
# to have lines paint above the previous line
self.zdown = True
# Rotation of the date labes on the x axis
self.tickrotation = 15
# How many "subparts" takes a major chart (datas) in the overall chart
# This is proportional to the total number of subcharts
self.rowsmajor = 5
# How many "subparts" takes a minor chart (indicators/observers) in the
# overall chart. This is proportional to the total number of subcharts
# Together with rowsmajor, this defines a proportion ratio betwen data
# charts and indicators/observers charts
self.rowsminor = 1
# Distance in between subcharts
self.plotdist = 0.0
# Have a grid in the background of all charts
self.grid = True
# Default plotstyle for the OHLC bars which (line -> line on close)
# Other options: 'bar' and 'candle'
self.style = 'line'
# Default color for the 'line on close' plot
self.loc = 'black'
# Default color for a bullish bar/candle (0.75 -> intensity of gray)
self.barup = '0.75'
# Default color for a bearish bar/candle
self.bardown = 'red'
# Level of transparency to apply to bars/cancles (NOT USED)
self.bartrans = 1.0
# Wether the candlesticks have to be filled or be transparent
self.barupfill = True
self.bardownfill = True
# Wether the candlesticks have to be filled or be transparent
self.fillalpha = 0.20
# Wether to plot volume or not. Note: if the data in question has no
# volume values, volume plotting will be skipped even if this is True
self.volume = True
# Wether to overlay the volume on the data or use a separate subchart
self.voloverlay = True
# Scaling of the volume to the data when plotting as overlay
self.volscaling = 0.33
# Pushing overlay volume up for better visibiliy. Experimentation
# needed if the volume and data overlap too much
self.volpushup = 0.00
# Default colour for the volume of a bullish day
self.volup = '#aaaaaa' # 0.66 of gray
# Default colour for the volume of a bearish day
self.voldown = '#cc6073' # (204, 96, 115)
# Transparency to apply to the volume when overlaying
self.voltrans = 0.50
# Transparency for text labels (NOT USED CURRENTLY)
self.subtxttrans = 0.66
# Default font text size for labels on the chart
self.subtxtsize = 9
# Transparency for the legend (NOT USED CURRENTLY)
self.legendtrans = 0.25
# Wether indicators have a leged displaey in their charts
self.legendind = True
# Location of the legend for indicators (see matplotlib)
self.legendindloc = 'upper left'
# Location of the legend for datafeeds (see matplotlib)
self.legenddataloc = 'upper left'
# Plot the last value of a line after the Object name
self.linevalues = True
# Plot a tag at the end of each line with the last value
self.valuetags = True
# Default color for horizontal lines (see plotinfo.plothlines)
self.hlinescolor = '0.66' # shade of gray
# Default style for horizontal lines
self.hlinesstyle = '--'
# Default width for horizontal lines
self.hlineswidth = 1.0
# Default color scheme: Tableau 10
self.lcolors = tableau10
# strftime Format string for the display of ticks on the x axis
self.fmt_x_ticks = None
# strftime Format string for the display of data points values
self.fmt_x_data = None
def color(self, idx):
colidx = tab10_index[idx % len(tab10_index)]
return self.lcolors[colidx]
| nilq/baby-python | python |
#!/usr/bin/env python
#Creates an instance in /home/pi/.config/lxsession/LXDE-pi/autostart which will autolaunch the server on the pi user account.
import time
print "Copy the path of the shortcut file by right clicking it and clicking 'copy path(s)'."
print "Paste the path when prompted by right clicking in the terminal and clicking 'paste'."
dspath = raw_input("Paste the full path to the server shortcut: ")
atspath = "/home/pi/.config/lxsession/LXDE-pi/autostart"
desktopentry = open(dspath, "r")
desktopcnt = desktopentry.readlines()
desktopentry.close()
workingline = "failsafe"
for line in desktopcnt:
if line[0:4] == "Exec":
workingline = line
if workingline == "failsafe":
print "no Exec line was found in the file you specified."
print "The program will terminate"
time.sleep(5)
exit()
workingline = workingline.strip()
workingline = workingline[6:len(workingline)]
autostartline = "@"+workingline+"\n"
readcurrent = open(atspath, "r")
readcnt = readcurrent.readlines()
readcurrent.close()
memory = []
for line in readcnt:
if len(line) > 2:
memory.append(line)
memory.insert(0, autostartline)
print memory
overwritecurrent = open(atspath, "w")
lenmem = len(memory)
for x in range(lenmem):
overwritecurrent.write("%s" %(memory[x]))
overwritecurrent.close()
print "Autostart entry created."
print "Program will terminate"
time.sleep(5)
exit()
| nilq/baby-python | python |
class Book():
'''
Creates a book object that can be used to populate a web page
Inputs:
- title: the title of the book [str]
- author: the author of the book [str]
- series: the series the book belongs to or None [str]
- review_text: a short blurb about the book [str]
- image_url: a place to find the cover image of the book [str]
'''
def __init__(self, title, author, series, review_text, image_url):
self.title = title
self.author = author
self.series = series
self.review_text = review_text
self.image_url = image_url
def create_book_info(self):
if self.series == None:
self.series = 'This is a stand alone book.'
else:
self.series = 'This book is part of the series {}'.format(self.series)
return {
'title': self.title,
'author': self.author,
'series': self.series,
'review_text': self.review_text,
'image_url': self.image_url
}
class Movie():
'''
Creates a book object that can be used to populate a web page
Inputs:
- title: the title of the book [str]
- author: the author of the book [str]
- series: the series the book belongs to or None [str]
- review_text: a short blurb about the book [str]
- image_url: a place to find the cover image of the book [str]
'''
def __init__(self, title, image_url, trailer_url):
self.title = title
self.poster_image_url = image_url
self.trailer_youtube_url = trailer_url
def create_movie_info(self):
return {
'title': self.title,
'image_url': self.image_url,
'trailer_url': self.trailer_url
}
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from .base import Smoother
__all__ = ['Smoother']
| nilq/baby-python | python |
from os.path import join, dirname
from textx import metamodel_for_language
def test_example():
mm = metamodel_for_language('questionnaire')
questionnaire = mm.model_from_file(join(dirname(__file__), 'example.que'))
assert len(questionnaire.questions) == 6
assert questionnaire.questions[3].text == 'Author name'
assert questionnaire.questions[2].type.__class__.__name__ == 'Free'
assert questionnaire.questions[0].type.__class__.__name__ == 'Choice'
assert questionnaire.questions[5].text == \
'This question is to test multiline feature and indenting.'
opt = questionnaire.questions[5].type.options
assert len(opt) == 2
assert opt[0].num == 1
assert opt[0].text == 'Working'
# Multiline
assert opt[1].text == \
'Not working. This is also to test multiline in choices.'
| nilq/baby-python | python |
# -*- coding: UTF-8 -*-
import threading
import json
import re
from datetime import datetime
from flask import current_app
from flask_jwt import current_identity, jwt_required
from flask_restful import Resource, request
from marshmallow import EXCLUDE, ValidationError
from sqlalchemy.exc import SQLAlchemyError
from common.utils import paginate_parse, pretty_response
from common.tasks import analysis_dataset, analysis_dataset_block, fetch_collection, delete_collection
from models.dataset import DatasetModel, DatasetSchema
from models.blockset import BlocksetModel, BlocksetSchema
class DatasetList(Resource):
@jwt_required()
def get(self):
""" Query all instances """
if current_identity.roles not in ['super']:
return pretty_response(403)
title = request.args.get('title', '')
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 10, type=int)
paginate = DatasetModel.query.filter(DatasetModel.title.like('%' + title + '%')).paginate(
page, per_page, max_per_page=100)
data = paginate_parse(paginate)
data['items'] = DatasetSchema(many=True).dump(paginate.items)
return pretty_response(200, data)
@jwt_required()
def post(self):
""" Insert multi-instances """
if current_identity.roles not in ['super']:
return pretty_response(403)
jsondata = request.get_json()
if DatasetModel.query.filter_by(title=jsondata['title']).first():
return pretty_response(40002)
headers = jsondata.get('header', [])
catalog = jsondata.get('catalog', None)
if '数值' not in headers \
or (catalog == 'block' and '板块' not in headers) \
or (catalog == 'point' and '标题' not in headers):
return pretty_response(40001)
try:
dataset_instance = DatasetSchema().load(jsondata, unknown=EXCLUDE)
dataset_instance.add(dataset_instance)
blocksets = BlocksetModel.query.all()
data = json.loads(
re.sub(r'[\s+]', '', json.dumps(jsondata.get('data', []))))
if catalog == 'block':
# 导入板块数据
t = threading.Thread(target=analysis_dataset_block, args=(
'T' + dataset_instance.uuid, data, blocksets, headers))
t.start()
else:
# 导入集合数据
t = threading.Thread(target=analysis_dataset, args=(
'T' + dataset_instance.uuid, data, blocksets, headers))
t.start()
dataset_dump = DatasetSchema().dump(dataset_instance)
return pretty_response(200, dataset_dump)
except ValidationError as e:
current_app.logger.error(e.messages)
return pretty_response(40003)
except SQLAlchemyError as e:
current_app.logger.error(e)
return pretty_response(50001)
def put(self):
""" Update multi-instances """
return pretty_response(405)
def delete(self):
""" Batch-delete instances """
return pretty_response(405)
class Dataset(Resource):
@jwt_required()
def get(self, uuid):
""" Query specific instance """
if current_identity.roles not in ['super']:
return pretty_response(403)
dataset_instance = DatasetModel.query.get_or_404(uuid)
dataset_dump = DatasetSchema().dump(dataset_instance)
result = fetch_collection('T' + dataset_instance.uuid, [])
dataset_dump['data'] = result
return pretty_response(200, dataset_dump)
@jwt_required()
def post(self, uuid):
""" Update specific instance """
if current_identity.roles not in ['super']:
return pretty_response(403)
dataset_instance = DatasetModel.query.get_or_404(uuid)
jsondata = request.get_json()
if not jsondata:
return pretty_response(40001)
catalog = jsondata.get('catalog', '')
match = jsondata.get('match', {})
pipeline = []
aggregate_items = []
aggregate_max = 0
if catalog == 'block':
pipeline = [{
'$match': match
}, {
'$group': {'_id': "$板块", 'value': {dataset_instance.mode: '$数值'}}
}]
result = fetch_collection('T' + dataset_instance.uuid, pipeline)
blockset_list = BlocksetModel.query.all()
for blockset in blockset_list:
temp = {
'title': blockset.title,
'area': blockset.area,
'centroid': json.loads(blockset.centroid),
'coordinates': json.loads(blockset.coordinates),
'org_value': 0,
'value': 0,
}
for item in result:
if blockset.title == item.get('_id', ''):
if not item.get('value', None):
break
item_value = item.get('value', 0)
temp['org_value'] = item_value
temp['value'] = round(
item_value / float(blockset.area), 4) if dataset_instance.inc_area else item_value
if temp['value'] > aggregate_max:
aggregate_max = temp['value']
break
aggregate_items.append(temp)
else:
pipeline = [{
'$match': match
}]
result = fetch_collection('T' + dataset_instance.uuid, pipeline)
for item in result:
aggregate_item = {
'title': item.get('标题', ''),
'address': item.get('地址', ''),
'lng': item.get('经度', ''),
'lat': item.get('纬度', ''),
'value': item.get('数值', 0),
}
if aggregate_item['value'] > aggregate_max:
aggregate_max = aggregate_item['value']
aggregate_items.append(aggregate_item)
return pretty_response(200, {'max': aggregate_max, 'items': aggregate_items})
@jwt_required()
def put(self, uuid):
""" Update specific instance """
if current_identity.roles not in ['super']:
return pretty_response(403)
dataset_instance = DatasetModel.query.get_or_404(uuid)
try:
jsondata = request.get_json()
DatasetSchema().load(jsondata, unknown=EXCLUDE)
for key, val in jsondata.items():
setattr(dataset_instance, key, val)
dataset_instance.updatetime = datetime.now()
dataset_instance.update()
dataset_dump = DatasetSchema().dump(dataset_instance)
return pretty_response(200, dataset_dump)
except ValidationError as e:
current_app.logger.error(e.messages)
return pretty_response(40003)
except SQLAlchemyError as e:
current_app.logger.error(e)
return pretty_response(50001)
@jwt_required()
def delete(self, uuid):
""" Delete specific instance """
if current_identity.roles not in ['super']:
return pretty_response(403)
dataset_instance = DatasetModel.query.get_or_404(uuid)
delete_collection('T' + dataset_instance.uuid)
try:
dataset_instance.delete(dataset_instance)
return pretty_response(20003)
except SQLAlchemyError as e:
current_app.logger.error(e)
pretty_response(50001)
class DatasetFree(Resource):
def put(self, uuid):
""" Update specific instance """
dataset_instance = DatasetModel.query.get_or_404(uuid)
try:
jsondata = request.get_json()
DatasetSchema().load(jsondata, unknown=EXCLUDE)
for key, val in jsondata.items():
setattr(dataset_instance, key, val)
dataset_instance.updatetime = datetime.now()
dataset_instance.update()
dataset_dump = DatasetSchema().dump(dataset_instance)
return pretty_response(200, dataset_dump)
except ValidationError as e:
current_app.logger.error(e.messages)
return pretty_response(40003)
except SQLAlchemyError as e:
current_app.logger.error(e)
return pretty_response(50001)
| nilq/baby-python | python |
"""
Discovering structure in heatmap data
=====================================
_thumb: .4, .2
"""
import pandas as pd
import seaborn as sns
sns.set(font="monospace")
# Load the brain networks example dataset
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
# Select a subset of the networks
used_networks = [1, 5, 6, 7, 8, 11, 12, 13, 16, 17]
used_columns = (df.columns.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
# Create a custom palette to identify the networks
network_pal = sns.cubehelix_palette(len(used_networks),
light=.9, dark=.1, reverse=True,
start=1, rot=-2)
network_lut = dict(zip(map(str, used_networks), network_pal))
# Convert the palette to vectors that will be drawn on the side of the matrix
networks = df.columns.get_level_values("network")
network_colors = pd.Series(networks, index=df.columns).map(network_lut)
# Create a custom colormap for the heatmap values
cmap = sns.diverging_palette(h_neg=210, h_pos=350, s=90, l=30, as_cmap=True)
# Draw the full plot
sns.clustermap(df.corr(), row_colors=network_colors, linewidths=.5,
col_colors=network_colors, figsize=(13, 13), cmap=cmap)
| nilq/baby-python | python |
from django.db import models
class Position(models.Model):
w = models.CharField(max_length=128, null=True, blank=True)
x = models.CharField(max_length=128, null=True, blank=True)
y = models.CharField(max_length=128, null=True, blank=True)
z = models.CharField(max_length=128, null=True, blank=True)
time_received = models.DateField()
| nilq/baby-python | python |
# -*- coding: UTF-8 -*-
# Copyright 2016-2018 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""
A library of `invoke
<http://docs.pyinvoke.org/en/latest/index.html>`__ tasks. See
:doc:`/invlib`.
.. autosummary::
:toctree:
tasks
utils
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import six
from importlib import import_module
from invoke import Collection
from unipath import Path
import atelier
def setup_from_tasks(
globals_dict, main_package=None,
settings_module_name=None, **kwargs):
"""
This is the function you must call from your :xfile:`tasks.py` file
in order to activate the tasks defined by atelier.
"""
if '__file__' not in globals_dict:
raise Exception(
"No '__file__' in %r. "
"First parameter to must be `globals()`" % globals_dict)
tasks_file = Path(globals_dict['__file__'])
if not tasks_file.exists():
raise Exception("No such file: %s" % tasks_file)
# print("20180428 setup_from_tasks() : {}".format(root_dir))
from atelier.invlib import tasks
from atelier.projects import get_project_from_tasks
prj = get_project_from_tasks(tasks_file.parent)
atelier.current_project = prj
if kwargs:
prj.config.update(kwargs)
if settings_module_name is not None:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module_name
from django.conf import settings
prj.config.update(
languages=[lng.name for lng in settings.SITE.languages])
if isinstance(main_package, six.string_types):
main_package = import_module(main_package)
if main_package:
prj.set_main_package(main_package)
self = Collection.from_module(tasks)
prj.set_namespace(self)
return self
| nilq/baby-python | python |
from flask import request, jsonify, current_app, make_response, session
import random
from info.libs.yuntongxun import sms
from . import passport_blue
from info.utils.response_code import RET
from info.utils.captcha.captcha import captcha
from info import redis_store,constants,db
# 导入模型类
from info.models import User
import re
from datetime import datetime
"""
json.loads:把json字符串转成字典
json.dumps: 把字典转成json字符串
json.load/json.dump(操作的是文件对象)
var data={
"mobile":mobile,
"image_code":imagecode,
...
}
a='123';
json的概念:本质字符串,基于键值对的字符串;轻量级的数据交互格式;
json的作用:实现跨语言,跨平台的数据交互;
xml 格式: 作用是用来传输数据;都是闭合标签
XML: xmltodic模块,xmltodict.parse()/unparse() 微信,
html用来展示数据;
<xml>
<mobile>12223234</mobile>
<image_code>12223234</image_code>
</xml>
JSON
{
"mobile":mobile,
"image_code":imagecode,
}
JSON.Stringify(data) 前端把对象转成json字符串;
"""
@passport_blue.route('/image_code')
def generate_image_code():
"""
1.获取前端生成的uuid,/image_code?image_code_id=uuid
request.args.get('image_code_id')
2. 判断参数是否存在,如果不存在uuid,直接return
3.调用工具生成图片验证码,
4.存储redis图片验证码的text文本,构造redis数据实例,用来存储业务相关的数据比如 :图片验证码
5. 返回图片给浏览器,
状态码:
return jsonify(errno=666,errmsg='uuid未获取到')
1. 自定义的状态码: 用来实现前后端的数据交互.
$.ajax({
url:'/image_code,
type:'get'
data:data,
contentType:'application/json'
success:function(resp){
if (resp == 666){
alert(成功)
}else{
alert(失败)
}
}
})
:return:
"""
# 获取参数
image_code_id=request.args.get('image_code_id')
# 校验参数是否存在,如果UUID不存在,返回错误信息
if not image_code_id:
return jsonify(errno=RET.PARAMERR,errmsg='参数缺失')
# 调用工具captcha生成图片验证码
name,text,image=captcha.generate_captcha()
# 保存图片验证码的文本
try:
redis_store.setex('ImageCode_'+image_code_id,constants.IMAGE_CODE_REDIS_EXPIRES,text)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg='保存图片验证码失败')
else:
response=make_response(image)
# 默认的响应报文Content-Type:text/html,应该修改默认的响应报文
response.headers['Content-Type']='image/jpg'
return response
@passport_blue.route('/sms_code',methods=['POST'])
def send_sms_code():
"""
发送短信验证码
获取参数---校验参数---业务处理(查询数据)---返回结果
1、获取post请求的三个参数;前端使用ajax传入的参数,前端如何传入json?
mobile/image_code/image_code_id
request.json.get()
2、检查参数的完整性
3、检查手机号的格式是否符合要求,使用正则
4、比较图片验证码,从redis数据库中获取真实的图片验证码
get()
5、判断图片验证码是否过期
6、需要先删除Redis中真实存在的图片验证码,因为图片验证码只能获取一次,比较一次.
7、比较图片验证码,如果图片验证码正确
**检查手机号是否注册过???
8、生成短信的随机数,六位数的随机数 random
9、保存短信随机数到Redis数据库中,
10、调用云通讯接口,发送短信,保存发送结果
11、返回发送结果
:return:
"""
mobile=request.json.get('mobile')
image_code=request.json.get('image_code')
image_code_id=request.json.get('image_code_id')
# 检查参数的完整性
if not all([mobile,image_code,image_code_id]):
return jsonify(errno=RET.PARAMERR,errmsg='参数不完整')
# 检查手机号的格式,13012345678
if not re.match(r'1[3456789]\d{9}$',mobile):
return jsonify(errno=RET.PARAMERR,errmsg='手机号格式错误')
# 尝试从redis数据库中获取真实的图片验证码
try:
real_image_code=redis_store.get('ImageCode_'+image_code_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg='获取图片验证码数据失败')
# 判断图片验证码是否过期
if not real_image_code:
return jsonify(errno=RET.NODATA,errmsg='图片验证码已过期')
# 删除Redis数据库中的图片验证码
try:
redis_store.delete('ImageCode_'+image_code_id)
except Exception as e:
current_app.logger.error(e)
# 比较图片验证码是否一致,忽略大小写
if real_image_code.lower() != image_code.lower():
return jsonify(errno=RET.DATAERR,errmsg='图片验证码错误')
# 确认用户是否注册过?
try:
# User.query.filter_by(mobile=mobile).first()
user=User.query.filter(User.mobile==mobile).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg='查询用户数据失败')
else:
# 判断查询结果是否存在
if user is not None:
return jsonify(errno=RET.DATAEXIST,errmsg='用户已存在')
#生成6位数短信随机数,使用随机数模块
sms_code='%06d' % random.randint (0, 999999)
print(sms_code)
try:
redis_store.setex('SMSCode_'+mobile,constants.SMS_CODE_REDIS_EXPIRES,sms_code)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg='保存短信数据失败')
# 调用云通讯扩展,发送短信
try:
ccp=sms.CCP()
result=ccp.send_template_sms(mobile,[sms_code,constants.SMS_CODE_REDIS_EXPIRES/60],1)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR,errmsg='发送短信异常')
# 判断发送是否成功
if result==0:
return jsonify(errno=RET.OK,errmsg='发送成功')
else:
return jsonify(errno=RET.THIRDERR,errmsg='发送失败')
@passport_blue.route('/register',methods=['POST'])
def register():
"""
用户注册
1、获取参数,mobile,sms_code,password
2、检查参数的完整性
3、检查手机号的格式
4、检查短信验证码,尝试从Redis数据库中获取真实的短信验证码
5、判断获取结果是否过期
6、先比较短信验证码是否一致
7、删除Redis数据库中的短信验证码
8、构造模型类对象
user=User()
user.password=password
9、提交数据到数据库中,mysql
10、把用户基本信息缓存到Redis数据库中
session['user_id']=user.id
session['mobile']=mobile
session['nick_name']=mobile
11、返回结果
:return:
"""
mobile=request.json.get('mobile')
sms_code=request.json.get('sms_code')
password=request.json.get('password')
# 检查参数完整性
if not all([mobile,sms_code,password]):
return jsonify(errno=RET.PARAMERR,errmsg='参数缺失')
# 检查手机号格式
if not re.match(r'1[3456789]\d{9}$',mobile):
return jsonify(errno=RET.PARAMERR,errmsg='手机号格式错误')
# 尝试从Redis中获取真实的短信验证码
try:
real_sms_code=redis_store.get('SMSCode_'+mobile)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg='查询短信验证码失败')
# 判断查询结果
if not real_sms_code:
return jsonify(errno=RET.NODATA,errmsg='短信验证码已过期')
# 比较短信验证码是否正确
if real_sms_code !=str(sms_code):
return jsonify(errno=RET.DATAERR,errmsg='短信验证码不一致')
# 删除redis数据库中存储的短信验证码
try:
redis_store.delete('SMSCode_'+mobile)
except Exception as e:
current_app.logger.error(e)
# 构造模型类对象
user=User()
user.mobile=mobile
user.nick_name=mobile
# 调用了模型类中的generate_password_hash实现了密码 加密储存,sha256
user.password=password
# 提交用户注册信息数据到mysql数据库中
try:
db.session.add(user)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
# 存储数据如果发生异常,需要进行回滚
db.session.rollback()
return jsonify(errno=RET.DBERR,errmsg='保存用户数据失败')
# 返回用户信息到Redis数据库中
session['user_id']=user.id
session['mobile']=mobile
session['nick_name']=mobile
# 返回结果
return jsonify(errno=RET.OK,errmsg='注册成功')
@passport_blue.route("/login",methods=['POST'])
def login():
"""
用户登录
1、获取参数:mobile,password
2、检查参数完整性
3、检查手机号的格式
4、根据手机号查询数据库,确认用户user存在
5、调用模型类检查密码是否正确的方法
6、记录用户的登录时间
user.last_login=datetime.now()
7、提交数据库,如果发生异常需要回滚
8、缓存用户信息session,昵称要换成user.nick_name
8、返回结果
:return:
"""
# 获取参数
mobile=request.json.get('mobile')
password=request.json.get('password')
# 检查参数的完整性
if not all([mobile,password]):
return jsonify(errno=RET.PARAMERR,errmsg='参数缺失')
# 检查手机号格式
if not re.match(r'1[3456789]\d{9}$',mobile):
return jsonify(errno=RET.PARAMERR,errmsg='手机号格式错误')
# 根据手机号查询数据库,确认用户已注册.
try:
user=User.query.filter_by(mobile=mobile).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg='查询用户数据失败')
# 判断用户是否注册,以及密码是否正确.
if user is None or not user.check_password(password):
return jsonify(errno=RET.DATAERR,errmsg='用户名或密码错误')
# 记录用户的登录时间
user.last_login=datetime.now()
# 提交数据到数据库中
try:
db.session.add(user)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR,errmsg='保存数据失败')
# 缓存用户信息到redis数据库中
session['user_id']=user.id
session['mobile']=mobile
# 缓存的用户昵称和注册时要有区别,因为登录可以登录多次,昵称有可能会修改
session['nick_name']=user.nick_name
# 返回结果
return jsonify(errno=RET.OK,errmsg='ok')
@passport_blue.route("/logout")
def logout():
"""
如果是前后端分离,以及符合RESTful风格,(表现层状态转换),退出的请求方法为delete
get/post/put/delete 获取/新建/修改/删除
退出登录
1、本质是清除服务器缓存的用户信息
:return:
"""
session.pop('user_id',None)
session.pop('mobile',None)
session.pop('nick_name',None)
return jsonify(errno=RET.OK,errmsg='OK')
pass
| nilq/baby-python | python |
from mpf.tests.MpfGameTestCase import MpfGameTestCase
from mpf.core.rgb_color import RGBColor
class TestBlinkenlight(MpfGameTestCase):
def get_config_file(self):
return 'config.yaml'
def get_platform(self):
return 'smart_virtual'
def get_machine_path(self):
return 'tests/machine_files/blinkenlight/'
def test_add_color_to_one_blinkenlight(self):
self.post_event('start_mode1')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.post_event('add_color_to_first_blinkenlight')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors')
def test_add_color_to_two_blinkenlights(self):
self.post_event('start_mode1')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.post_event('add_color_to_all_blinkenlights')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors')
def test_remove_color_from_one_blinkenlight(self):
self.post_event('start_mode1')
self.post_event('add_color_to_second_blinkenlight')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.post_event('remove_color_from_first_blinkenlight')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.post_event('remove_color_from_second_blinkenlight')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors')
def test_remove_all_colors_from_all_blinkenlights(self):
self.post_event('start_mode1')
self.post_event('start_mode2')
self.post_event('add_color_to_first_blinkenlight')
self.post_event('add_color_to_second_blinkenlight')
self.post_event('add_color_to_third_blinkenlight')
self.post_event('add_color_to_all_blinkenlights')
self.post_event('mode2_add_color_to_first_blinkenlight')
self.assertPlaceholderEvaluates(3, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(2, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight3.num_colors')
self.post_event('remove_all_colors_from_all_blinkenlights')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight3.num_colors')
def test_remove_mode_colors_from_one_blinkenlight(self):
self.post_event('start_mode1')
self.post_event('start_mode2')
self.post_event('add_color_to_first_blinkenlight')
self.post_event('mode2_add_color_to_first_blinkenlight')
self.post_event('mode2_add_color2_to_first_blinkenlight')
self.assertPlaceholderEvaluates(3, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.post_event('mode2_remove_mode_colors_from_first_blinkenlight')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors')
def test_remove_mode_colors_when_mode_ends(self):
self.post_event('start_mode1')
self.post_event('start_mode2')
self.post_event('add_color_to_first_blinkenlight')
self.post_event('add_color_to_second_blinkenlight')
self.post_event('mode2_add_color_to_first_blinkenlight')
self.post_event('mode2_add_color2_to_first_blinkenlight')
self.post_event('mode2_add_color_to_second_blinkenlight')
self.assertPlaceholderEvaluates(3, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(2, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.post_event('stop_mode2')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors')
def test_flashing_cycle(self):
self.post_event('start_mode1')
self.post_event('add_color_to_all_blinkenlights')
self.post_event('add_color_to_first_blinkenlight')
self.post_event('add_color_to_second_blinkenlight')
self.post_event('add_color_to_third_blinkenlight')
self.assertPlaceholderEvaluates(2, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertPlaceholderEvaluates(2, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight3.num_colors')
blinkenlight1 = self.machine.blinkenlights['my_blinkenlight1']
blinkenlight2 = self.machine.blinkenlights['my_blinkenlight2']
blinkenlight3 = self.machine.blinkenlights['my_blinkenlight3']
blue = RGBColor('blue')
green = RGBColor('green')
red = RGBColor('red')
yellow = RGBColor('yellow')
purple = RGBColor('purple')
cyan = RGBColor('cyan')
off = RGBColor('off')
self.assertEqual(blue, blinkenlight1.light._color)
self.assertEqual(green, blinkenlight2.light._color)
self.assertEqual(purple, blinkenlight3.light._color)
self.advance_time_and_run(1)
self.assertEqual(red, blinkenlight1.light._color)
self.assertEqual(green, blinkenlight2.light._color)
self.assertEqual(off, blinkenlight3.light._color)
self.advance_time_and_run(1)
self.assertEqual(off, blinkenlight1.light._color)
self.assertEqual(yellow, blinkenlight2.light._color)
self.assertEqual(purple, blinkenlight3.light._color)
self.advance_time_and_run(1)
self.assertEqual(blue, blinkenlight1.light._color)
self.assertEqual(yellow, blinkenlight2.light._color)
self.assertEqual(off, blinkenlight3.light._color)
self.advance_time_and_run(1)
self.assertEqual(red, blinkenlight1.light._color)
self.assertEqual(green, blinkenlight2.light._color)
self.assertEqual(purple, blinkenlight3.light._color)
self.advance_time_and_run(1)
self.assertEqual(off, blinkenlight1.light._color)
self.assertEqual(green, blinkenlight2.light._color)
self.assertEqual(off, blinkenlight3.light._color)
self.advance_time_and_run(1)
self.assertEqual(blue, blinkenlight1.light._color)
self.assertEqual(yellow, blinkenlight2.light._color)
self.assertEqual(purple, blinkenlight3.light._color)
self.post_event("remove_color_from_third_blinkenlight")
self.advance_time_and_run(1)
self.assertEqual(red, blinkenlight1.light._color)
self.assertEqual(yellow, blinkenlight2.light._color)
self.assertEqual(off, blinkenlight3.light._color)
self.advance_time_and_run(1)
self.assertEqual(off, blinkenlight1.light._color)
self.assertEqual(green, blinkenlight2.light._color)
self.assertEqual(off, blinkenlight3.light._color)
self.advance_time_and_run(1)
self.assertEqual(blue, blinkenlight1.light._color)
self.assertEqual(green, blinkenlight2.light._color)
self.assertEqual(off, blinkenlight3.light._color)
def test_priority_order(self):
self.post_event('start_mode1')
self.post_event('start_mode2')
blinkenlight1 = self.machine.blinkenlights['my_blinkenlight1']
red = RGBColor('red')
orange = RGBColor('orange')
off = RGBColor('off')
self.post_event('add_color_to_first_blinkenlight')
self.post_event('mode2_add_color_to_first_blinkenlight')
self.assertEqual(orange, blinkenlight1.light._color)
self.advance_time_and_run(1)
self.assertEqual(red, blinkenlight1.light._color)
self.advance_time_and_run(1)
self.assertEqual(off, blinkenlight1.light._color)
self.post_event('remove_all_colors_from_all_blinkenlights')
self.advance_time_and_run(1)
self.post_event('mode2_add_color_to_first_blinkenlight')
self.post_event('add_color_to_first_blinkenlight')
self.assertEqual(orange, blinkenlight1.light._color)
self.advance_time_and_run(1)
self.assertEqual(red, blinkenlight1.light._color)
self.advance_time_and_run(1)
self.assertEqual(off, blinkenlight1.light._color)
def test_replace_existing_color(self):
self.post_event('start_mode1')
blinkenlight1 = self.machine.blinkenlights['my_blinkenlight1']
blue = RGBColor('blue')
darkred = RGBColor('darkred')
off = RGBColor('off')
self.post_event('add_color_to_all_blinkenlights')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertEqual(blue, blinkenlight1.light._color)
self.advance_time_and_run(1.5)
self.assertEqual(off, blinkenlight1.light._color)
self.advance_time_and_run(1.5)
self.assertEqual(blue, blinkenlight1.light._color)
self.advance_time_and_run(1.5)
self.assertEqual(off, blinkenlight1.light._color)
self.advance_time_and_run(1.5)
self.post_event('add_color_to_first_blinkenlight_with_duplicate_key')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight1.num_colors')
self.assertEqual(darkred, blinkenlight1.light._color)
self.advance_time_and_run(1.5)
self.assertEqual(off, blinkenlight1.light._color)
self.advance_time_and_run(1.5)
self.assertEqual(darkred, blinkenlight1.light._color)
self.advance_time_and_run(1.5)
self.assertEqual(off, blinkenlight1.light._color)
def test_show_with_tokens(self):
self.post_event('start_mode2')
blinkenlight = self.machine.blinkenlights['my_blinkenlight2']
gray = RGBColor('gray')
off = RGBColor('off')
self.assertPlaceholderEvaluates(0, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.post_event('play_blinkenlight_token_show')
self.assertPlaceholderEvaluates(1, 'device.blinkenlights.my_blinkenlight2.num_colors')
self.assertEqual(gray, blinkenlight.light._color)
self.advance_time_and_run(2)
self.assertEqual(off, blinkenlight.light._color)
self.advance_time_and_run(2)
self.assertEqual(gray, blinkenlight.light._color)
self.advance_time_and_run(2)
self.assertEqual(off, blinkenlight.light._color)
| nilq/baby-python | python |
import mysql.connector
mydb = mysql.connector.connect(
host = 'localhost',
user = "root",
#passwd = "ant904",
database = "spl"
#auth_plugin='mysql_native_password'
)
myCursor = mydb.cursor()
qusTimeList=list()
qusTimeList.append("0:00:03")
qusTimeList.append("0:00:02")
qusTimeList.append("0:00:05")
qusTimeList.append("0:00:06")
qusTimeList.append("0:00:08")
qusTimeList.append("0:00:02")
qusTimeList.append("0:00:03")
qusTimeList.append("0:00:03")
qusTimeList.append("0:00:04")
qusTimeList.append("0:00:05")
qusTimeList.append("0:00:08")
gameTimeList=list()
gameTimeList.append("0:00:13")
gameTimeList.append("0:00:19")
gameTimeList.append("0:00:24")
gameTimeList.append("0:00:08")
gameTimeList.append("0:00:09")
gameTimeList.append("0:00:13")
gameTimeList.append("0:00:08")
gameTimeList.append("0:00:09")
gameTimeList.append("0:00:13")
gameTimeList.append("0:00:14")
gameTimeList.append("0:00:12")
#myCursor.execute("CREATE database test222")
sql="INSERT into controlGroup(questionTime , gameTime) VALUES (%s, %s)"
val=(qusTimeList[9],gameTimeList[9])
myCursor.execute(sql,val)
mydb.commit()
myCursor.close()
mydb.close()
| nilq/baby-python | python |
from toee import *
def OnBeginSpellCast( spell ):
print "Vampiric Touch OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-necromancy-conjure", spell.caster )
def OnSpellEffect( spell ):
print "Vampiric Touch OnSpellEffect"
dice = dice_new("1d6")
dice.number = min(10, (spell.caster_level) / 2)
spell.duration = 600
target = spell.target_list[0]
if not (target.obj == spell.caster):
attack_successful = spell.caster.perform_touch_attack( target.obj , 1)
if attack_successful & D20CAF_HIT:
old_hp = target.obj.stat_level_get( stat_hp_current )
target.obj.spell_damage_weaponlike( spell.caster, D20DT_NEGATIVE_ENERGY, dice, D20DAP_UNSPECIFIED, 100, D20A_CAST_SPELL, spell.id, attack_successful, 0 )
new_hp = target.obj.stat_level_get( stat_hp_current )
damage = old_hp - new_hp
if damage > (old_hp + 10):
damage = old_hp + 10
#spell.caster.condition_add_with_args( 'Temporary_Hit_Points', spell.id, spell.duration, damage )
spell.caster.condition_add_with_args( 'sp-Vampiric Touch', spell.id, spell.duration, damage )
spell.caster.float_mesfile_line( 'mes\\spell.mes', 20005, 0 )
else:
#target.obj.float_mesfile_line( 'mes\\spell.mes', 30021 )
game.particles( 'Fizzle', target.obj )
spell.target_list.remove_target( target.obj )
game.particles( 'sp-Vampiric Touch', spell.caster )
def OnBeginRound( spell ):
print "Vampiric Touch OnBeginRound"
def OnEndSpellCast( spell ):
print "Vampiric Touch OnEndSpellCast" | nilq/baby-python | python |
from django.contrib import admin
from .models import District, Quarter, Community
admin.site.register(District)
admin.site.register(Quarter)
admin.site.register(Community) | nilq/baby-python | python |
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
# pylint: disable=W1203,C0411,C0413,no-value-for-parameter
import argparse
import json
import logging
import os
import subprocess
import sys
import tempfile
from mmcv.utils import Config
import yaml
from eval import main as evaluate
sys.path.append(f'{os.path.abspath(os.path.dirname(__file__))}/../../')
from tools.misc import train, get_work_dir
def parse_args():
""" Parses input args. """
parser = argparse.ArgumentParser()
parser.add_argument('config',
help='A path to model training configuration file (.py).')
parser.add_argument('gpu_num', type=int,
help='A number of GPUs to use in training.')
parser.add_argument('out',
help='A path to output file where models metrics will be saved (.yml).')
parser.add_argument('--update_config',
help='Update configuration file by parameters specified here.'
'Use quotes if you are going to change several params.',
default='')
parser.add_argument('--show-dir', '--show_dir', dest='show_dir',
help='A directory where images with drawn detected objects will be saved.')
return parser.parse_args()
def is_clustering_needed(cfg):
if cfg.total_epochs > 0:
return False
if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead':
return False
if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered':
return False
return True
def cluster(cfg, config_path, update_config):
mmdetection_tools = f'{os.path.dirname(__file__)}/../../../../external/mmdetection/tools'
logging.info('Clustering started...')
widths = cfg.model.bbox_head.anchor_generator.widths
n_clust = 0
for w in widths:
n_clust += len(w) if isinstance(w, (list, tuple)) else 1
n_clust = ' --n_clust ' + str(n_clust)
group_as = ''
if isinstance(widths[0], (list, tuple)):
group_as = ' --group_as ' + ' '.join([str(len(w)) for w in widths])
config = ' --config ' + config_path
tmp_file = tempfile.NamedTemporaryFile(delete=False)
out = f' --out {tmp_file.name}'
if 'pipeline' in cfg.data.train:
img_shape = [t for t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][
'img_scale']
else:
img_shape = [t for t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][
'img_scale']
img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}'
subprocess.run(f'python {mmdetection_tools}/cluster_boxes.py'
f'{config}'
f'{n_clust}'
f'{group_as}'
f'{update_config}'
f'{img_shape}'
f'{out}'.split(' '), check=True)
with open(tmp_file.name) as src_file:
content = json.load(src_file)
widths, heights = content['widths'], content['heights']
if not update_config:
update_config = ' --update_config'
update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(" ", "")}'
update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(" ", "")}'
logging.info('... clustering completed.')
return update_config
def main():
""" Main function. """
logging.basicConfig(level=logging.INFO)
args = parse_args()
logging.info(f'Commandline:\n{" ".join(sys.argv)}')
cfg = Config.fromfile(args.config)
update_config = f' --update_config {args.update_config}' if args.update_config else ''
if is_clustering_needed(cfg):
update_config = cluster(cfg, args.config, update_config)
logging.info('Training started ...')
training_info = train(args.config, args.gpu_num, update_config)
logging.info('... training completed.')
work_dir = get_work_dir(cfg, args.update_config)
logging.info('Evaluation started ...')
evaluate(os.path.join(work_dir, "config.py"), os.path.join(work_dir, "latest.pth"), args.out, '', args.show_dir)
logging.info('... evaluation completed.')
with open(args.out, 'a+') as dst_file:
yaml.dump(training_info, dst_file)
if __name__ == '__main__':
main()
| nilq/baby-python | python |
import os
import codecs
from io import StringIO
from pytest import fixture
from rave import filesystem
class DummyProvider:
def __init__(self, files):
self.files = files;
def list(self):
return self.files
def has(self, filename):
return filename in self.list()
def open(self, filename, *args, **kwargs):
if not self.has(filename):
raise filesystem.FileNotFound(filename)
if not self.isfile(filename):
raise filesystem.NotAFile(filename)
return DummyFile(self, filename)
def isfile(self, filename):
return self.has(filename) and '.' in filename
def isdir(self, filename):
return self.has(filename) and not self.isfile(filename)
class FaultyProvider(DummyProvider):
def __init__(self, files, faulty_files, err=filesystem.FileNotFound):
super().__init__(files)
self.faulty_files = faulty_files
self.error_class = err
def open(self, filename, *args, **kwargs):
if filename in self.faulty_files:
raise self.error_class(filename)
return super().open(filename, *args, **kwargs)
class DummyFile(filesystem.File):
def __init__(self, parent, filename, content='merry saltmas'):
self.parent = parent
self.filename = filename
self._buffer = StringIO(content)
self._closed = False
def close(self):
if self._closed:
raise filesystem.FileClosed(self.filename)
self._closed = True
def opened(self):
return not self._closed
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
def read(self, amount=None):
if self.closed:
raise filesystem.FileClosed(self.filename)
return self._buffer.read(amount)
def write(self, buffer):
if self.closed:
raise filesystem.FileClosed(self.filename)
return self._buffer.write(buffer)
def seek(self, offset, mode=os.SEEK_CUR):
return self._buffer.seek(offset, mode)
def tell(self):
return self._buffer.tell()
class DummyTransformer:
CONSUME = False
RELATIVE = False
def __init__(self, filename, handle):
self.filename = filename
self.handle = handle
self.files = [ self.filename + '.rot13' ]
def list(self):
return self.files
def has(self, filename):
return filename in self.list()
def open(self, filename, *args, **kwargs):
if not self.has(filename):
raise filesystem.FileNotFound(filename)
return ROT13File(self, filename, self.handle)
def isfile(self, filename):
return self.has(filename)
def isdir(self, filename):
return False
def relative(self):
return self.RELATIVE
def consumes(self):
return self.CONSUME
def valid(self):
return True
class FaultyTransformer:
def __init__(self, filename, handle):
raise FileNotFound(filename)
class InvalidTransformer(DummyTransformer):
def valid(self):
return False
class ROT13File(filesystem.File):
def __init__(self, parent, filename, handle):
self.parent = parent
self.filename = filename
self.handle = handle
def close(self):
return self.handle.close()
def opened(self):
return self.handle.opened()
def readable(self):
return self.handle.readable()
def writable(self):
return self.handle.writable()
def seekable(self):
return self.handle.seekable()
def read(self, amount=None):
return codecs.encode(self.handle.read(amount), 'rot13')
def write(self, buffer):
return self.handle.write(codecs.encode(buffer, 'rot13'))
def seek(self, offset, mode=os.SEEK_CUR):
return self.handle.seek(offset, mode)
def tell(self):
return self.handle.tell()
@fixture
def fs():
return filesystem.FileSystem()
@fixture
def dummyfs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' }))
return fs
@fixture
def nestedfs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/y', '/y/c.txt', '/y/p.png', '/y/z' }))
return fs
@fixture
def parentlessfs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/z/k.txt' }))
return fs
@fixture
def doublefs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' }))
fs.mount('/y', DummyProvider({ '/c.exe', '/d.jpg' }))
return fs
@fixture
def mergedfs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' }))
fs.mount('/x', DummyProvider({ '/c.exe', '/d.jpg' }))
return fs
@fixture
def transfs():
fs = dummyfs()
fs.transform('\.txt$', DummyTransformer)
return fs
| nilq/baby-python | python |
"""Forward measurements from Xiaomi Mi plant sensor via MQTT.
See https://github.com/ChristianKuehnel/plantgateway for more details.
"""
##############################################
#
# This is open source software licensed under the Apache License 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
##############################################
from enum import Enum
import os
import logging
import json
import time
from datetime import datetime
from typing import List, Optional
import yaml
import paho.mqtt.client as mqtt
from miflora.miflora_poller import MiFloraPoller, MI_BATTERY, MI_LIGHT, MI_CONDUCTIVITY, MI_MOISTURE, MI_TEMPERATURE, MI_FWVERSION
from btlewrap.bluepy import BluepyBackend
from plantgw import __version__
class MQTTAttributes(Enum):
"""Attributes sent in the json dict."""
BATTERY = 'battery'
TEMPERATURE = 'temperature'
BRIGHTNESS = 'brightness'
MOISTURE = 'moisture'
CONDUCTIVITY = 'conductivity'
TIMESTAMP = 'timestamp'
FIRMWARE = 'firmware'
# unit of measurement for the different attributes
UNIT_OF_MEASUREMENT = {
MQTTAttributes.BATTERY: '%',
MQTTAttributes.TEMPERATURE: '°C',
MQTTAttributes.BRIGHTNESS: 'lux',
MQTTAttributes.MOISTURE: '%',
MQTTAttributes.CONDUCTIVITY: 'µS/cm',
MQTTAttributes.TIMESTAMP: 's',
MQTTAttributes.FIRMWARE: '',
}
# home assistant device classes for the different attributes
DEVICE_CLASS = {
MQTTAttributes.BATTERY: 'battery',
MQTTAttributes.TEMPERATURE: 'temperature',
MQTTAttributes.BRIGHTNESS: 'illuminance',
MQTTAttributes.MOISTURE: None,
MQTTAttributes.CONDUCTIVITY: None,
MQTTAttributes.TIMESTAMP: 'timestamp',
MQTTAttributes.FIRMWARE: None,
}
# pylint: disable-msg=too-many-instance-attributes
class Configuration:
"""Stores the program configuration."""
def __init__(self, config_file_path):
with open(config_file_path, 'r') as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
self._configure_logging(config)
self.interface = 0
if 'interface' in config:
self.interface = config['interface']
self.mqtt_port = 8883 # type: int
self.mqtt_user = None # type: Optional[str]
self.mqtt_password = None # type: Optional[str]
self.mqtt_ca_cert = None # type: Optional[str]
self.mqtt_client_id = None # type: Optional[str]
self.mqtt_trailing_slash = True # type:bool
self.mqtt_timestamp_format = None # type: Optional[str]
self.mqtt_discovery_prefix = None # type: Optional[str]
self.sensors = [] # type: List[SensorConfig]
if 'port' in config['mqtt']:
self.mqtt_port = config['mqtt']['port']
if 'user' in config['mqtt']:
self.mqtt_user = config['mqtt']['user']
if 'password' in config['mqtt']:
self.mqtt_password = config['mqtt']['password']
if 'ca_cert' in config['mqtt']:
self.mqtt_ca_cert = config['mqtt']['ca_cert']
if 'client_id' in config['mqtt']:
self.mqtt_client_id = config['mqtt']['client_id']
if 'trailing_slash' in config['mqtt'] and not config['mqtt']['trailing_slash']:
self.mqtt_trailing_slash = False
if 'timestamp_format' in config['mqtt']:
self.mqtt_timestamp_format = config['mqtt']['timestamp_format']
self.mqtt_server = config['mqtt']['server']
self.mqtt_prefix = config['mqtt']['prefix']
for sensor_config in config['sensors']:
fail_silent = 'fail_silent' in sensor_config
self.sensors.append(SensorConfig(sensor_config['mac'], sensor_config.get('alias', None), fail_silent, sensor_config.get('cache_timeout', 600), sensor_config.get('cache_retries', 3)))
if 'discovery_prefix' in config['mqtt']:
self.mqtt_discovery_prefix = config['mqtt']['discovery_prefix']
@staticmethod
def _configure_logging(config):
timeform = '%a, %d %b %Y %H:%M:%S'
logform = '%(asctime)s %(levelname)-8s %(message)s'
loglevel = logging.INFO
if 'debug' in config:
loglevel = logging.DEBUG
if 'logfile' in config:
logfile = os.path.abspath(os.path.expanduser(config['logfile']))
logging.basicConfig(filename=logfile, level=loglevel, datefmt=timeform, format=logform)
else:
logging.basicConfig(level=loglevel, datefmt=timeform, format=logform)
class SensorConfig:
"""Stores the configuration of a sensor."""
def __init__(self, mac: str, alias: str = None, fail_silent: bool = False, cache_timeout: int = 600, cache_retries: int = 3):
if mac is None:
msg = 'mac of sensor must not be None'
logging.error(msg)
raise Exception('mac of sensor must not be None')
self.mac = mac
self.alias = alias
self.fail_silent = fail_silent
self.cache_timeout = cache_timeout
self.cache_retries = cache_retries
def get_topic(self) -> str:
"""Get the topic name for the sensor."""
if self.alias is not None:
return self.alias
return '0x' + self.short_mac
def __str__(self) -> str:
if self.alias:
result = self.alias
else:
result = self.mac
if self.fail_silent:
result += ' (fail silent)'
return result
@property
def short_mac(self):
"""Get the sensor mac without ':' in it."""
return self.mac.replace(':', '')
@staticmethod
def get_name_string(sensor_list) -> str:
"""Convert a list of sensor objects to a nice string."""
return ', '.join([str(sensor) for sensor in sensor_list])
class PlantGateway:
"""Main class of the module."""
def __init__(self, config_file_path: str = '~/.plantgw.yaml'):
config_file_path = os.path.abspath(os.path.expanduser(config_file_path))
self.config = Configuration(config_file_path) # type: Configuration
logging.info('PlantGateway version %s', __version__)
logging.info('loaded config file from %s', config_file_path)
self.mqtt_client = None
self.connected = False # type: bool
def start_client(self):
"""Start the mqtt client."""
if not self.connected:
self._start_client()
def stop_client(self):
"""Stop the mqtt client."""
if self.connected:
self.mqtt_client.disconnect()
self.connected = False
self.mqtt_client.loop_stop()
logging.info('Disconnected MQTT connection')
def _start_client(self):
self.mqtt_client = mqtt.Client(self.config.mqtt_client_id)
if self.config.mqtt_user is not None:
self.mqtt_client.username_pw_set(self.config.mqtt_user, self.config.mqtt_password)
if self.config.mqtt_ca_cert is not None:
self.mqtt_client.tls_set(self.config.mqtt_ca_cert, cert_reqs=mqtt.ssl.CERT_REQUIRED)
def _on_connect(client, _, flags, return_code):
self.connected = True
logging.info("MQTT connection returned result: %s", mqtt.connack_string(return_code))
self.mqtt_client.on_connect = _on_connect
self.mqtt_client.connect(self.config.mqtt_server, self.config.mqtt_port, 60)
self.mqtt_client.loop_start()
def _publish(self, sensor_config: SensorConfig, poller: MiFloraPoller):
self.start_client()
state_topic = self._get_state_topic(sensor_config)
data = {
MQTTAttributes.BATTERY.value: poller.parameter_value(MI_BATTERY),
MQTTAttributes.TEMPERATURE.value: '{0:.1f}'.format(poller.parameter_value(MI_TEMPERATURE)),
MQTTAttributes.BRIGHTNESS.value: poller.parameter_value(MI_LIGHT),
MQTTAttributes.MOISTURE.value: poller.parameter_value(MI_MOISTURE),
MQTTAttributes.CONDUCTIVITY.value: poller.parameter_value(MI_CONDUCTIVITY),
MQTTAttributes.FIRMWARE.value: poller.parameter_value(MI_FWVERSION),
MQTTAttributes.TIMESTAMP.value: datetime.now().isoformat(),
}
for key, value in data.items():
logging.debug("%s: %s", key, value)
if self.config.mqtt_timestamp_format is not None:
data['timestamp'] = datetime.now().strftime(self.config.mqtt_timestamp_format)
json_payload = json.dumps(data)
self.mqtt_client.publish(state_topic, json_payload, qos=1, retain=True)
logging.info('sent data to topic %s', state_topic)
logging.info('payload: %s', data)
def _get_state_topic(self, sensor_config: SensorConfig) -> str:
prefix_fmt = '{}/{}'
if self.config.mqtt_trailing_slash:
prefix_fmt += '/'
prefix = prefix_fmt.format(self.config.mqtt_prefix,
sensor_config.get_topic())
return prefix
def process_mac(self, sensor_config: SensorConfig):
"""Get data from one Sensor."""
logging.info('Getting data from sensor %s', sensor_config.get_topic())
poller = MiFloraPoller(sensor_config.mac, BluepyBackend, sensor_config.cache_timeout, sensor_config.cache_retries)
self.announce_sensor(sensor_config)
self._publish(sensor_config, poller)
def process_all(self):
"""Get data from all sensors."""
next_list = self.config.sensors
timeout = 1 # initial timeout in seconds
max_retry = 6 # number of retries
retry_count = 0
while retry_count < max_retry and next_list:
# if this is not the first try: wait some time before trying again
if retry_count > 0:
logging.info('try %d of %d: could not process sensor(s) %s. Waiting %d sec for next try',
retry_count, max_retry, SensorConfig.get_name_string(next_list), timeout)
time.sleep(timeout)
timeout *= 2 # exponential backoff-time
current_list = next_list
retry_count += 1
next_list = []
for sensor in current_list:
try:
self.process_mac(sensor)
# pylint: disable=bare-except, broad-except
except Exception as exception:
next_list.append(sensor) # if it failed, we'll try again in the next round
msg = "could not read data from {} ({}) with reason: {}".format(
sensor.mac, sensor.alias, str(exception))
if sensor.fail_silent:
logging.error(msg)
logging.warning('fail_silent is set for sensor %s, so not raising an exception.', sensor.alias)
else:
logging.exception(msg)
# print(msg)
# return sensors that could not be processed after max_retry
return next_list
def announce_sensor(self, sensor_config: SensorConfig):
"""Announce the sensor via Home Assistant MQTT Discovery.
see https://www.home-assistant.io/docs/mqtt/discovery/
"""
if self.config.mqtt_discovery_prefix is None:
return
self.start_client()
self_name = 'plantgateway'
device_name = '0x{}'.format(sensor_config.short_mac)
for attribute in MQTTAttributes:
unique_id = '{}_{}_{}'.format(self_name, device_name, attribute.value)
topic = '{}/sensor/{}_{}/{}/config'.format(self.config.mqtt_discovery_prefix, self_name, device_name, attribute.value)
payload = {
'state_topic': self._get_state_topic(sensor_config),
'json_attributes_topic': self._get_state_topic(sensor_config),
'unit_of_measurement': UNIT_OF_MEASUREMENT[attribute],
'value_template': '{{value_json.'+attribute.value+'}}',
'unique_id': unique_id,
'device': {
'identifiers': [ '{}_{}'.format(self_name, device_name), ],
'name': device_name,
'sw_version': 'plantgw dev',
'model': "MiFlora compatible plant humidity, brightness, conductivity, temperature sensor",
'manufacturer': 'to be deternmined',
}
}
if sensor_config.alias is not None:
payload['name'] = '{}_{}'.format(sensor_config.alias, attribute.value)
else:
payload['name'] = '{}_{}'.format(device_name, attribute.value)
if DEVICE_CLASS[attribute] is not None:
payload['device_class'] = DEVICE_CLASS[attribute]
json_payload = json.dumps(payload)
self.mqtt_client.publish(topic, json_payload, qos=1, retain=False)
logging.info('sent sensor config to topic %s', topic)
logging.info('payload: %s', payload)
| nilq/baby-python | python |
#
# This file contains the Python code from Program 6.2 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by Bruno R. Preiss.
#
# Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm06_02.txt
#
class StackAsArray(Stack):
def __init__(self, size = 0):
super(StackAsArray, self).__init__()
self._array = Array(size)
def purge(self):
while self._count > 0:
self._array[self._count] = None
self._count -= 1
#...
| nilq/baby-python | python |
print('----->DESAFIO 48<-----')
print('Vou te mostrar a soma de todos os números impares múltiplos de 3 que estão no intervalo de 1 a 500!')
soma = 0
for c in range(0, 501):
if c > 0 and c % 2 != 0 and c % 3 == 0:
soma += c
print(soma)
| nilq/baby-python | python |
#################################################################
# Name: randDLA.py #
# Authors: Michael Battaglia #
# Function: Program simulates diffusion limited aggregation #
# using Monte Carlo Methods. #
#################################################################
#essential modules
import numpy as np
import matplotlib.pyplot as plt
#function: 2D diffusion limited random walk
def randDLA(lims, sink=False, source=False, periodic=True, N=False):
"""
sink: position vector for aggregation sink
if False, then boundary is sink
source: position vector for particle source
if False, then particles randomly appear on Free spaces
lims: vector of dimension lengths
if False, then periodic boundaries
N: number of participating particles
if False, then spawn particles until source or boundary is taken
"""
if sink is False:
if periodic:
#there will be no aggregate
print("No aggregate can form")
return float("NaN")
if N is False:
if source is False:
if not periodic:
#aggregate will never end
print("No end condition for aggregate")
return float("NaN")
#initialize list of occupied positions
occupied_pos = []
anchored=np.zeros(lims,dtype=int)
#generate particles
generate = True
while generate:
if source:
#specified source
pos = pos_0
else:
#random source particle
pos = np.array([np.random.randint(0,lim) for lim in lims],dtype=int)
if not anchored[pos[0]][pos[1]]:
#take each step if position is not in a stuck position
while not isStuck(pos, lims, sink, periodic, anchored):
#take a random step in a random direction with a random orientation
step = np.zeros(len(pos))
step[np.random.randint(0,len(pos))] = 1-2*np.random.randint(0,2)
pos = pos + step
#impose periodic boundary
pos = np.mod(pos,lims).astype(int)
if len(occupied_pos)==0:
print("Position:", pos)
occupied_pos.append(pos)
anchored[pos[0],pos[1]] = 1
print("Anchored:",len(occupied_pos))
print("Anchor pos:",pos)
if N:
#generate until N particles
if len(occupied_pos) == N:
#generated N particles
generate = False
else:
#generate until
if source:
#source covered triggers end
if all(pos == source):
#occupied source
generate = False
if periodic:
#boundary covered triggers end
if any(pos==lims-1) or any(pos==0):
#occupied boundary
generate = False
#return list of occupied positions
return anchored, np.array(occupied_pos)
#function: check if particle is stuck (to edge, or other particle)
def isStuck(pos, lims, sink, periodic, anchored):
xp = pos[0]
yp = pos[1]
if not periodic:
#not periodic, gets stuck on wall
if any(pos==lims-1) or any(pos==0):
#if the particle has reached a wall
return True
if all(pos==sink):
#if particle hits sink
return True
if anchored[xp-1:xp+2,yp-1:yp+2].any():
#if particle is adjacent to an anchored particle
return True
else:
#particle is free
return False
#function: animated plot of 2D random walk
def D2plot(pos, animate=0.01):
if animate:
for i in range(len(pos)):
plt.cla()
plt.title('diffusion limited aggregation')
plt.scatter(pos.T[0][:i+1],pos.T[1][:i+1])
plt.xlabel('x position')
plt.ylabel('y position')
plt.draw()
plt.pause(animate)
else:
plt.title('diffusion limited aggregation')
plt.scatter(pos.T[0],pos.T[1])
plt.xlabel('x position')
plt.ylabel('y position')
plt.show()
#function: evaluate fractal dimension
def fracDim(image):
print(image.shape)
cen = (np.array([image.shape[0],image.shape[1]])-1)/2
r = range(1, min(cen)+1)
m = np.zeros(len(r))
for i in range(len(r)):
subimage = image[cen[0]-r[i]:cen[0]+r[i]+1,cen[1]-r[i]:cen[1]+r[i]+1]
m[i] = subimage.sum()
plt.title("fractal dimension")
plt.plot(r,m)
plt.ylabel('mass')
plt.xlabel('radius')
plt.yscale('log')
plt.xscale('log')
plt.show()
#function: main
if __name__ == '__main__':
#size of box
L = np.array([201,201])
#central position
central = (L-1)/2
#take a random walk until aggregation reaches edge
image, pos = randDLA(L, central)
#plot path
D2plot(pos, animate=True)
#plot fractal dimension
fracDim(image)
| nilq/baby-python | python |
#import
import random
import os
import numpy
dic = {}
with open("points3D.txt","r") as n:
for line in n:
a = line.split(" ")
temp = []
temp.append(float(a[1]))
temp.append(float(a[2]))
temp.append(float(a[3]))
dic[a[0]] = temp[:]
print(dic["1"])
#end
| nilq/baby-python | python |
def getone(coll, key, default=None):
try:
value = coll[key]
except [IndexError, KeyError, TypeError]:
return default;
else:
return value;
| nilq/baby-python | python |
#!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import itertools
import json
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPTS_DIR = os.path.dirname(SCRIPT_DIR)
FUCHSIA_ROOT = os.path.dirname(SCRIPTS_DIR)
# The maximum number of size percentage points a binary is allowed to drop.
# A greater amount will raise a flag.
MAX_SIZE_DECREASE = 10
# The maximum number of size percentage points a binary is allowed to gain.
# A greater amount will raise a flag.
MAX_SIZE_INCREASE = 1
class Type(object):
AUX = 'aux'
IMAGE = 'image'
TESTS = 'tests'
@classmethod
def all(cls): return [cls.AUX, cls.IMAGE, cls.TESTS]
class Origin(object):
LEGACY = 'legacy'
MIGRATED = 'migrated'
@classmethod
def all(cls): return [cls.LEGACY, cls.MIGRATED]
class Manifest(object):
'''Lists the contents of a manifest file'''
def __init__(self, origin, type, contents):
self.origin = origin
self.type = type
self.contents = contents
def __repr__(self):
return 'M[%s-%s]' % (self.origin, self.type)
class CustomJSONEncoder(json.JSONEncoder):
'''A JSON encoder that handles sets and sorts lists.'''
def default(self, object):
if isinstance(object, FileDataSet) or isinstance(object, FileData):
return object.to_json()
return json.JSONEncoder.default(self, object)
class FileData(object):
'''Represents a file referred to in a manifest.'''
def __init__(self, path, size=None):
self.path = path
self.size = size if size else os.path.getsize(path)
def __eq__(self, other):
return self.path == other.path
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return len(hash(self.path))
def __repr__(self):
return 'F[' + self.path + ']'
def to_json(self):
return {
'path': self.path,
'size': self.size,
}
@classmethod
def from_json(cls, input):
return FileData(input['path'], input['size'])
class FileDataSet(object):
'''Represents a set of files.'''
def __init__(self):
# map { name --> FileData }
self.files = {}
def add(self, name, file):
if name == 'lib/libdriver.so':
# libdriver is a complicated hydra whose many heads we don't need to
# worry about here.
return
if name in self.files and file != self.files[name]:
print('Error: different file under path ' + name + ':')
print(' - ' + str(file))
print(' - ' + str(self.files[name]))
return
self.files[name] = file
def filenames(self):
return set(self.files.keys())
def get_file(self, name):
return self.files[name]
def __len__(self):
return len(self.files)
def to_json(self):
return self.files
@classmethod
def from_json(cls, input):
result = FileDataSet()
for name, data in input.iteritems():
result.add(name, FileData.from_json(data))
return result
class Summary(object):
'''Data for a particular state of the build.'''
def __init__(self):
# map { type --> FileDataSet }
self.objects = {}
def add_objects(self, type, objects):
dataset = self.objects.setdefault(type, FileDataSet())
for name, path in objects.iteritems():
dataset.add(name, FileData(path))
def get_objects(self, type):
return self.objects[type]
def __repr__(self):
items = ['%s=%s' % (t, len(o)) for (t, o) in self.objects.iteritems()]
return 'S[' + ', '.join(items) + ']'
def to_json(self, output):
json.dump(self.objects, output, cls=CustomJSONEncoder, indent=2,
sort_keys=True, separators=(',', ': '))
@classmethod
def from_json(cls, input):
result = Summary()
data = json.load(input)
for type in Type.all():
result.objects[type] = FileDataSet.from_json(data[type])
return result
def generate_summary(manifests, base_dir):
'''Generates a summary based on the manifests found in the build.'''
result = Summary()
for type in Type.all():
for manifest in filter(lambda m: m.type == type, manifests):
contents = manifest.contents.copy()
contents = dict([(n, os.path.join(base_dir, p))
for (n, p) in contents.iteritems()])
result.add_objects(type, contents)
return result
def report(manifest, is_error, message):
type = 'Error' if is_error else 'Warning'
print('%s%s%s' % (type.ljust(10), manifest.ljust(8), message))
def compare_summaries(reference, current):
'''Compares summaries for two states of the build.'''
match = True
for type in Type.all():
reference_objects = reference.get_objects(type)
current_objects = current.get_objects(type)
reference_names = reference_objects.filenames()
current_names = current_objects.filenames()
# Missing and new files.
if reference_names != current_names:
match = False
removed = reference_names - current_names
if removed:
for element in removed:
report(type, True, 'element removed: ' + element)
added = current_names - reference_names
if added:
for element in added:
report(type, True, 'element removed: ' + element)
# Size changes.
for name in reference_names & current_names:
reference_size = reference_objects.get_file(name).size
current_size = current_objects.get_file(name).size
if current_size == reference_size:
continue
is_diff_positive = current_size > reference_size
diff_percentage = 100 * (current_size - reference_size) / reference_size
is_error = False
if (diff_percentage < -MAX_SIZE_DECREASE or
diff_percentage > MAX_SIZE_INCREASE):
match = False
is_error = True
report(type, is_error, 'size change for ' + name + ': ' +
('+' if is_diff_positive else '-') +
str(abs(diff_percentage)) + '%')
return match
def main():
parser = argparse.ArgumentParser(
description='Performs verifications after moving an element from '
'ZN to GN.')
parser.add_argument('--build-dir',
help='path to the GN build dir',
default=os.path.join(FUCHSIA_ROOT, 'out', 'default'))
parser.add_argument('--summary',
help='path to the summary file to generate')
parser.add_argument('--reference',
help='path to the summary file to compare against')
args = parser.parse_args()
if not args.summary and not args.reference:
print('At least one of --summary or --reference needs to be set.')
parser.print_help()
return 1
# Load up manifests from the current build.
manifests = []
for origin in Origin.all():
for type in Type.all():
path = os.path.join(args.build_dir, 'obj', 'build', 'unification',
'images',
'%s-%s.unification.manifest' % (origin, type))
with open(path, 'r') as manifest_file:
contents = dict(map(lambda line: line.strip().split('=', 1),
manifest_file.readlines()))
manifests.append(Manifest(origin, type, contents))
# Generate a summary for the current build.
summary = generate_summary(manifests, args.build_dir)
# If applicable, save the current build's summary.
if args.summary:
dirname = os.path.dirname(args.summary)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(args.summary, 'w') as output_file:
summary.to_json(output_file)
# If applicable, compare the current summary to a previously-saved one.
if args.reference:
with open(args.reference, 'r') as input_file:
reference = Summary.from_json(input_file)
if not compare_summaries(reference, summary):
print('Error: summaries do not match!')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| nilq/baby-python | python |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create renderer stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
cow = vtk.vtkBYUReader()
cow.SetGeometryFileName("" + str(VTK_DATA_ROOT) + "/Data/Viewpoint/cow.g")
cowMapper = vtk.vtkPolyDataMapper()
cowMapper.SetInputConnection(cow.GetOutputPort())
cowActor = vtk.vtkActor()
cowActor.SetMapper(cowMapper)
cowActor.GetProperty().SetDiffuseColor(0.9608,0.8706,0.7020)
cowAxesSource = vtk.vtkAxes()
cowAxesSource.SetScaleFactor(10)
cowAxesSource.SetOrigin(0,0,0)
cowAxesMapper = vtk.vtkPolyDataMapper()
cowAxesMapper.SetInputConnection(cowAxesSource.GetOutputPort())
cowAxes = vtk.vtkActor()
cowAxes.SetMapper(cowAxesMapper)
ren1.AddActor(cowAxes)
cowAxes.VisibilityOff()
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(cowActor)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(320,240)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(0)
ren1.GetActiveCamera().Dolly(1.4)
ren1.ResetCameraClippingRange()
cowAxes.VisibilityOn()
renWin.Render()
# render the image
#
# prevent the tk window from showing up then start the event loop
#
def RotateX (__vtk__temp0=0,__vtk__temp1=0):
cowActor.SetOrientation(0,0,0)
ren1.ResetCameraClippingRange()
renWin.Render()
renWin.Render()
renWin.EraseOff()
i = 1
while i <= 6:
cowActor.RotateX(60)
renWin.Render()
renWin.Render()
i = i + 1
renWin.EraseOn()
def RotateY (__vtk__temp0=0,__vtk__temp1=0):
cowActor.SetOrientation(0,0,0)
ren1.ResetCameraClippingRange()
renWin.Render()
renWin.Render()
renWin.EraseOff()
i = 1
while i <= 6:
cowActor.RotateY(60)
renWin.Render()
renWin.Render()
i = i + 1
renWin.EraseOn()
def RotateZ (__vtk__temp0=0,__vtk__temp1=0):
cowActor.SetOrientation(0,0,0)
ren1.ResetCameraClippingRange()
renWin.Render()
renWin.Render()
renWin.EraseOff()
i = 1
while i <= 6:
cowActor.RotateZ(60)
renWin.Render()
renWin.Render()
i = i + 1
renWin.EraseOn()
def RotateXY (__vtk__temp0=0,__vtk__temp1=0):
cowActor.SetOrientation(0,0,0)
cowActor.RotateX(60)
ren1.ResetCameraClippingRange()
renWin.Render()
renWin.Render()
renWin.EraseOff()
i = 1
while i <= 6:
cowActor.RotateY(60)
renWin.Render()
renWin.Render()
i = i + 1
renWin.EraseOn()
RotateX()
RotateY()
RotateZ()
RotateXY()
renWin.EraseOff()
# --- end of script --
| nilq/baby-python | python |
from collections import defaultdict
with open('day10/input.txt', 'r') as file:
data = sorted([int(x.strip()) for x in file.readlines()])
data = [0] + data
data.append(data[-1] + 3)
jolt_1, jolt_3 = 0, 0
for i in range(len(data)):
current = data[i - 1]
if (data[i] - current) == 1:
jolt_1 += 1
elif (data[i] - current) == 3:
jolt_3 += 1
jumps = [1, 2, 3]
routes = defaultdict(int) # default value is 0
routes[0] = 1
for i in data[1:]:
routes[i] = sum([routes[i - j] for j in jumps])
print(f"Result 1: {jolt_1 * jolt_3}\nResult 2: {routes[data[-1]]}") | nilq/baby-python | python |
from django.contrib.auth.hashers import make_password
from rest_framework import serializers
from .models import User
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework import response, status
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password')
def validate_password(self, value: str) -> str:
return make_password(value)
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super(MyTokenObtainPairSerializer, cls).get_token(user)
return token
class UsersSerializers(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'phone_number')
class UserUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'phone_number')
| nilq/baby-python | python |
import tensorflow as tf
# GPU版Tensor Flowを、特定のGPUで実行する
GPU_INDEX = 2
tf.config.set_soft_device_placement(True)
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
print(gpus)
print(logical_gpus)
except RuntimeError as e:
print(e)
try:
with tf.device('/device:GPU:{}'.format(GPU_INDEX)): # GPUの番号を指定する
# MNIST
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test)
except RuntimeError as e:
print(e)
| nilq/baby-python | python |
import pytest
from package_one.module_one import IntegerAdder
@pytest.fixture
def adder():
print("Test set-up!")
yield IntegerAdder()
print("Test tear-down")
def test_integer_adder(adder):
assert adder.add(1, 2) == 3
"""
In case you'd like to declare a fixture that executes only once per module, then declare a fixture like this:
@pytest.fixture(scope="module")
"""
@pytest.mark.parametrize(
"operand_one, operand_two, expected_result",
[
(1, 2, 3),
(10, 20, 30),
(-5, -10, -15)
]
)
def test_integer_adder_complex(
adder, operand_one, operand_two, expected_result
):
assert adder.add(operand_one, operand_two) == expected_result
| nilq/baby-python | python |
def snail(array):
results = []
while len(array) > 0:
results += array[0]
del array[0]
if len(array) > 0:
for i in array:
results += [i[-1]]
del i[-1]
if array[-1]:
results += array[-1][::-1]
del array[-1]
for i in reversed(array):
results += [i[0]]
del i[0]
return results
| nilq/baby-python | python |
import os
from google.appengine.ext.webapp import template
from base_controller import CacheableHandler
from models.event import Event
class EventWizardHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "event_wizard"
def __init__(self, *args, **kw):
super(EventWizardHandler, self).__init__(*args, **kw)
self.cache_expiration = 60 * 60
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/eventwizard.html")
selected_event_key = self.request.get('event', '')
if selected_event_key and Event.validate_key_name(selected_event_key):
selected_event = Event.get_by_id(selected_event_key)
if selected_event:
self.template_values['selected_event'] = selected_event
return template.render(path, self.template_values)
class ReactEventWizardHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "event_wizard_react"
def __init__(self, *args, **kw):
super(ReactEventWizardHandler, self).__init__(*args, **kw)
self.cache_expiration = 60 * 60
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/react-eventwizard.html")
return template.render(path, self.template_values)
| nilq/baby-python | python |
"""
MIT License
Copyright (c) 2021 martinpflaum
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#%%
import torchvision
import math
import torch
import torch.nn as nn
import numpy as np
import lightly
import pandas as pd
from data.data_utils import get_train_val_test_split,open_image,\
post_load_default,post_load_scene_depth,post_load_normal,\
post_load_lightning,load_edge_tensor,load_pickle,save_pickle
from perceptual_loss import PerceptualLoss
from torch.utils.data import Dataset
class GaussianNoise:
"""
this is from lightly https://docs.lightly.ai
Applies random Gaussian noise to a tensor.
The intensity of the noise is dependent on the mean of the pixel values.
See https://arxiv.org/pdf/2101.04909.pdf for more information.
"""
def __call__(self, sample: torch.Tensor) -> torch.Tensor:
mu = sample.mean()
snr = np.random.randint(low=4, high=8)
sigma = mu / snr
noise = torch.normal(torch.zeros(sample.shape), sigma)
return sample + noise
def depth_calc_std_mean(img_data_set_root):
train,val,test = get_train_val_test_split("./data_splits/train_test_split.csv")
out = []
for name in train:
scene_depth = post_load_scene_depth(open_image(name,"scene_depth",img_data_set_root)).reshape(-1)
out += [scene_depth]
out = torch.cat(out).reshape(-1)
return torch.std_mean(out, unbiased=False)
def get_all(file_name):
df = pd.read_csv(file_name)
df = df[["train_val_test"]]
split = np.array(df)
return split
class BrainDatasetSceneDepth(Dataset):
def __init__(self,img_data_set_root,indicies) :
super().__init__()
self.img_data_set_root = img_data_set_root
self.indicies = indicies
self.size = len(indicies)
def __len__(self):
#print("get_len")
return self.size
def __getitem__(self, index):
name = self.indicies[index]
return post_load_scene_depth(open_image(name,"scene_depth",self.img_data_set_root))
img_data_set_root="D:/ImageDatasetBig"
indicies = get_all("./data_splits/train_test_split.csv")
indicies
#%%
dset = BrainDatasetSceneDepth(img_data_set_root,indicies)
#%%
num_workers = 0
batch_size = 128
seed = 1
epochs = 50
input_size = 64
# dimension of the embeddings
num_ftrs = 512
# dimension of the output of the prediction and projection heads
out_dim = proj_hidden_dim = 512
# the prediction head uses a bottleneck architecture
pred_hidden_dim = 128
# use 2 layers in the projection head
num_mlp_layers = 2
mean,std = torch.tensor(0),torch.tensor(1)
mean,std = depth_calc_std_mean(img_data_set_root)
mean,std = mean.item(),std.item()
mean,std = (mean,mean,mean),(std,std,std)
transform = torchvision.transforms.Compose([
torchvision.transforms.Grayscale(num_output_channels=3),
torchvision.transforms.RandomResizedCrop(size=(64,64), scale=(0.2, 1.0)),
torchvision.transforms.RandomHorizontalFlip(p=0.5),
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.GaussianBlur(21),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean,std),
GaussianNoise(),
])
collate_fn = lightly.data.BaseCollateFunction(transform)
torch.manual_seed(0)
np.random.seed(0)
# set the path to the dataset
path_to_data = 'C:/Users/Martin/Downloads/test'
dataset_train_simsiam = lightly.data.LightlyDataset(
input_dir=path_to_data
)
dataloader_train_simsiam = torch.utils.data.DataLoader(
dataset_train_simsiam,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn,
drop_last=True,
num_workers=num_workers
)
resnet = torchvision.models.resnet18()
backbone = nn.Sequential(*list(resnet.children())[:-1])
# create the SimSiam model using the backbone from above
model = lightly.models.SimSiam(
backbone,
num_ftrs=num_ftrs,
proj_hidden_dim=pred_hidden_dim,
pred_hidden_dim=pred_hidden_dim,
out_dim=out_dim,
num_mlp_layers=num_mlp_layers
)
# SimSiam uses a symmetric negative cosine similarity loss
criterion = lightly.loss.SymNegCosineSimilarityLoss()
# scale the learning rate
lr = 0.05 * batch_size / 256
# use SGD with momentum and weight decay
optimizer = torch.optim.SGD(
model.parameters(),
lr=lr,
momentum=0.9,
weight_decay=5e-4
)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
avg_loss = 0.
avg_output_std = 0.
for e in range(epochs):
for (x0, x1), _, _ in dataloader_train_simsiam:
# move images to the gpu
x0 = x0.to(device)
x1 = x1.to(device)
# run the model on both transforms of the images
# the output of the simsiam model is a y containing the predictions
# and projections for each input x
y0, y1 = model(x0, x1)
# backpropagation
loss = criterion(y0, y1)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# calculate the per-dimension standard deviation of the outputs
# we can use this later to check whether the embeddings are collapsing
output, _ = y0
output = output.detach()
output = torch.nn.functional.normalize(output, dim=1)
output_std = torch.std(output, 0)
output_std = output_std.mean()
# use moving averages to track the loss and standard deviation
w = 0.9
avg_loss = w * avg_loss + (1 - w) * loss.item()
avg_output_std = w * avg_output_std + (1 - w) * output_std.item()
# the level of collapse is large if the standard deviation of the l2
# normalized output is much smaller than 1 / sqrt(dim)
collapse_level = max(0., 1 - math.sqrt(out_dim) * avg_output_std)
# print intermediate results
print(f'[Epoch {e:3d}] '
f'Loss = {avg_loss:.2f} | '
f'Collapse Level: {collapse_level:.2f} / 1.00')
model = PerceptualLoss(model.backbone.cpu(),mean,std).cpu()
save_pickle(model,"perceptual_loss.pth")
# %%
| nilq/baby-python | python |
__author__ = "Jeremy Nelson"
import csv
import datetime
import json
import urllib2
from json_ld.utilities.creator import JSONLinkedDataCreator
class JohnPeabodyHarringtonJSONLinkedDataCreator(JSONLinkedDataCreator):
CC_URI = 'http://id.loc.gov/authorities/names/n84168445'
LOC_URI = 'http://id.loc.gov/authorities/names/no2008011986'
def __init__(self,
creator_id=None,
csv_filename=None):
"""Initializes instance of John Peabody Harrington JSON-LD creator
Parameters:
creator_id -- LOC ID of creator, defaults to Colorado College
csv_filename -- Filename of CSV file, defaults to None
"""
if creator_id is None:
creator_id = self.CC_URI
super(JohnPeabodyHarringtonJSONLinkedDataCreator, self).__init__(
**{'creator_id': creator_id})
self.title_prefix = 'John P. Harrington Papers 1907-1959 (some earlier)'
jph_csv_reader = csv.DictReader(open(csv_filename, 'rb'))
for row in jph_csv_reader:
self.records.append(row)
def __generate_topics__(self,
lcsh_subjects,
work_dict):
"""Internal function generates a list of topics from
a list of LCSH uri
Parameters:
lcsh_subjects -- list of http://id.loc.gov subject uri
work_dict -- Dictionary of properties for the Creative Work
"""
if len(lcsh_subjects) > 0:
work_dict['bf:subject'] = []
for subject_uri in lcsh_subjects:
uri = subject_uri.replace('"','').strip()
if not self.topics.has_key(uri):
loc_uri = json.load(
urllib2.urlopen('{0}.json'.format(uri)))
loc_key = u"<{0}>".format(uri)
self.topics[uri] = {
'@type': 'bf:Topic',
'prov:Generation': self.__generate_provenance__(),
'bf:label': loc_uri[loc_key].get(
u'<http://www.w3.org/2004/02/skos/core#prefLabel>',
[{'value':uri},])[0].get('value'),
'bf:identifier': uri,
'bf:hasAuthority': self.LOC_URI}
lcc_classification = loc_uri[loc_key].get(
u'<http://www.loc.gov/mads/rdf/v1#classification>',
None)
if lcc_classification is not None:
class_value = lcc_classification[0].get('value')
if not work_dict.has_key('bf:class-lcc'):
work_dict['bf:class-lcc'] = [class_value, ]
else:
work_dict['bf:class-lcc'].append(class_value)
work_dict['bf:subject'].append(self.topics[uri])
return work_dict
def generate(self):
"Linked Data Cataloging for John Peabody Harrington Collection"
for row in self.records:
work_dict = self.__generate_work__(
creative_work_class='bf:Manuscript')
instance_dict = self.__generate_instance__('online resource')
instance_dict['bf:publication'] = {
'providerName': 'National Anthropological Archives',
'identifier': 'http://id.loc.gov/authorities/names/n50065490'}
if len(row.get('Part')) > 0:
title_prefix = '{0} {1}'.format(self.title_prefix,
row.get('Part'))
else:
title_prefix = self.title_prefix
title_str = '{0} Microfilm {1}, Reel {2}'.format(
title_prefix,
row.get('Microfilm #'),
row.get('Reel #'))
title_parts = row.get('Title').replace('"','').split(",")
if len(title_parts) > 1:
sub_titles = []
for sub in title_parts:
sub = sub.strip()
sub_titles.append(sub)
title_str = '{0} "{1}'.format(title_str,
'", "'.join(sub_titles))
title_str += '"'
elif len(title_parts) == 1:
title_str = "{0} {1}".format(title_str,
title_parts[0])
work_dict['bf:title'] = {'@type': 'bf:TitleEntity',
'bf:titleValue': title_str,
'bf:label': title_str}
instance_dict['schema:contentUrl'] = '/pdf/{0}'.format(
row.get('Filename'))
work_dict['bf:hasInstance'] = [instance_dict,]
work_dict['rda:dateOfPublicationManifestation'] = row.get('Publication Date')
subjects = row.get('LCSH').split(",")
work_dict = self.__generate_topics__(subjects, work_dict)
self.works.append(work_dict)
| nilq/baby-python | python |
from .algo.algo_endpoints import AlgoEndpoints
from .graph.graph_endpoints import GraphEndpoints
from .query_runner.query_runner import QueryRunner
class IndirectEndpoints(AlgoEndpoints, GraphEndpoints):
def __init__(self, query_runner: QueryRunner, namespace: str):
super().__init__(query_runner, namespace)
| nilq/baby-python | python |
#-*- coding:utf-8 -*-
import generate_chat
import seq2seq_model
import tensorflow as tf
import numpy as np
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
if __name__ == '__main__':
_, _, source_vocab_size = generate_chat.get_vocabs(generate_chat.vocab_encode_file)
_, _, target_vocab_size = generate_chat.get_vocabs(generate_chat.vocab_decode_file)
train_set = generate_chat.read_data(generate_chat.train_encode_vec_file, generate_chat.train_decode_vec_file)
test_set = generate_chat.read_data(generate_chat.test_encode_vec_file, generate_chat.test_decode_vec_file)
train_bucket_sizes = [len(train_set[i]) for i in range(len(generate_chat._buckets))]
train_total_size = float(sum(train_bucket_sizes))
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size for i in range(len(train_bucket_sizes))]
with tf.Session() as sess:
model = seq2seq_model.Seq2SeqModel(source_vocab_size,
target_vocab_size,
generate_chat._buckets,
generate_chat.units_num,
generate_chat.num_layers,
generate_chat.max_gradient_norm,
generate_chat.batch_size,
generate_chat.learning_rate,
generate_chat.learning_rate_decay_factor,
use_lstm=True)
ckpt = tf.train.get_checkpoint_state('.')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
sess.run(tf.global_variables_initializer())
loss = 0.0
step = 0
previous_losses = []
run = True
while run:
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in range(len(train_buckets_scale)) if train_buckets_scale[i] > random_number_01])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False)
print("step:%d,loss:%f" % (step, step_loss))
loss += step_loss / 2000
step += 1
if step % 1000 == 0:
print("step:%d,per_loss:%f" % (step, loss))
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
model.saver.save(sess, "./../../datas/model/share/rebot/chatbot.ckpt", global_step=model.global_step)
loss = 0.0
if step % 5000 == 0:
for bucket_id in range(len(generate_chat._buckets)):
if len(test_set[bucket_id]) == 0:
continue
encoder_inputs, decoder_inputs, target_weights = model.get_batch(test_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id,
True)
print("bucket_id:%d,eval_loss:%f" % (bucket_id, eval_loss))
| nilq/baby-python | python |
import tkinter as tk
from src.ui.core import SortableTable
from src.library.model import PlaylistModel
class Table(SortableTable):
def __init__(self, parent, logger, library):
SortableTable.__init__(self, parent, logger)
self.library = library
self.add_column('Playlist Name', sortable=True)
self.init_treeview()
self.on_playback_event = None
self.context_view_switcher = None
def get_unsorted_item_list(self):
return self.library.session.query(PlaylistModel).all()
def create_column_values_for(self, item):
return (item.name,)
def compare_items(self, a, b):
from src.utility import compare_strings
multiplier = -1 if self.sort_in_reverse else 1
return compare_strings(a.name, b.name) * multiplier
def set_on_playback_event(self, on_playback_event):
self.on_playback_event = on_playback_event
def dispatch_playback_event(self, event):
if self.on_playback_event is not None:
self.on_playback_event(event)
def display_context_menu(self, event):
context_menu = tk.Menu(master=self.frame, tearoff=0)
context_menu.add_command(label='Set as Queue', command=self.play_playlist)
context_menu.add_command(label='Set as Queue (Shuffled)', command=self.play_playlist_shuffled)
context_menu.add_command(label='More Info', command=lambda: self.view_playlist_info(event.x_root, event.y_root))
context_menu.add_command(label='Delete', command=self.delete_playlist)
context_menu.post(event.x_root, event.y_root)
def play_playlist(self):
from src.backend.event import PlayPlaylist
self.dispatch_playback_event(PlayPlaylist(self.get_selected_item(), shuffled=False))
def play_playlist_shuffled(self):
from src.backend.event import PlayPlaylist
self.dispatch_playback_event(PlayPlaylist(self.get_selected_item(), shuffled=True))
def view_playlist_info(self, x, y):
from ...info.playlist_info import PlaylistInfo
if self.context_view_switcher is not None:
info = PlaylistInfo(self.context_view_switcher, self.logger, self.library)
info.set_item(self.get_selected_item())
self.context_view_switcher.open_page(info)
def delete_playlist(self):
item = self.get_selected_item()
self.library.session.delete(item)
self.library.session.commit()
self.refresh() | nilq/baby-python | python |
from slicegan import preprocessing, util
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import time
import matplotlib
import wandb
# 1. Start a new run
wandb.init(project='SuperRes', name='SliceGAN train', entity='tldr-group')
def train(pth, imtype, datatype, real_data, Disc, Gen, nc, l, nz, sf):
"""
train the generator
:param pth: path to save all files, imgs and data
:param imtype: image type e.g nphase, colour or gray
:param datatype: training data format e.g. tif, jpg ect
:param real_data: path to training data
:param Disc:
:param Gen:
:param nc: channels
:param l: image size
:param nz: latent vector size
:param sf: scale factor for training data
:return:
"""
if len(real_data) == 1:
real_data *= 3
isotropic = True
else:
isotropic = False
print('Loading Dataset...')
dataset_xyz = preprocessing.batch(real_data, datatype, l, sf)
## Constants for NNs
matplotlib.use('Agg')
ngpu = 1
num_epochs = 30
# batch sizes
batch_size = 32
D_batch_size = 8
# optimiser params for G and D
lrg = 0.0001
lrd = 0.0001
beta1 = 0
beta2 = 0.9
Lambda = 10
critic_iters = 5
cudnn.benchmark = True
workers = 0
lz = 4
##Dataloaders for each orientation
device = torch.device("cuda:0" if(torch.cuda.is_available() and ngpu > 0) else "cpu")
print(device, " will be used.\n")
# D trained using different data for x, y and z directions
dataloaderx = torch.utils.data.DataLoader(dataset_xyz[0], batch_size=batch_size,
shuffle=True, num_workers=workers)
dataloadery = torch.utils.data.DataLoader(dataset_xyz[1], batch_size=batch_size,
shuffle=True, num_workers=workers)
dataloaderz = torch.utils.data.DataLoader(dataset_xyz[2], batch_size=batch_size,
shuffle=True, num_workers=workers)
# Create the Genetator network
netG = Gen().to(device)
if ('cuda' in str(device)) and (ngpu > 1):
netG = nn.DataParallel(netG, list(range(ngpu)))
optG = optim.Adam(netG.parameters(), lr=lrg, betas=(beta1, beta2))
# Define 1 Discriminator and optimizer for each plane in each dimension
netDs = []
optDs = []
for i in range(3):
netD = Disc()
netD = (nn.DataParallel(netD, list(range(ngpu)))).to(device)
netDs.append(netD)
optDs.append(optim.Adam(netDs[i].parameters(), lr=lrd, betas=(beta1, beta2)))
disc_real_log = []
disc_fake_log = []
gp_log = []
Wass_log = []
print("Starting Training Loop...")
# For each epoch
start = time.time()
for epoch in range(num_epochs):
# sample data for each direction
for i, (datax, datay, dataz) in enumerate(zip(dataloaderx, dataloadery, dataloaderz), 1):
dataset = [datax, datay, dataz]
### Initialise
### Discriminator
## Generate fake image batch with G
noise = torch.randn(D_batch_size, nz, lz,lz,lz, device=device)
fake_data = netG(noise).detach()
# for each dim (d1, d2 and d3 are used as permutations to make 3D volume into a batch of 2D images)
for dim, (netD, optimizer, data, d1, d2, d3) in enumerate(
zip(netDs, optDs, dataset, [2, 3, 4], [3, 2, 2], [4, 4, 3])):
if isotropic:
netD = netDs[0]
optimizer = optDs[0]
netD.zero_grad()
##train on real images
real_data = data[0].to(device)
out_real = netD(real_data).view(-1).mean()
## train on fake images
# perform permutation + reshape to turn volume into batch of 2D images to pass to D
fake_data_perm = fake_data.permute(0, d1, 1, d2, d3).reshape(l * D_batch_size, nc, l, l)
out_fake = netD(fake_data_perm).mean()
gradient_penalty = util.calc_gradient_penalty(netD, real_data, fake_data_perm[:batch_size],
batch_size, l,
device, Lambda, nc)
disc_cost = out_fake - out_real + gradient_penalty
disc_cost.backward()
optimizer.step()
#logs for plotting
wandb.log({'out real': out_real.item()})
wandb.log({'out fake': out_fake.item()})
wandb.log({'wass': out_real.item() - out_fake.item()})
### Generator Training
if i % int(critic_iters) == 0:
netG.zero_grad()
errG = 0
noise = torch.randn(batch_size, nz, lz,lz,lz, device=device)
fake = netG(noise)
for dim, (netD, d1, d2, d3) in enumerate(
zip(netDs, [2, 3, 4], [3, 2, 2], [4, 4, 3])):
if isotropic:
#only need one D
netD = netDs[0]
# permute and reshape to feed to disc
fake_data_perm = fake.permute(0, d1, 1, d2, d3).reshape(l * batch_size, nc, l, l)
output = netD(fake_data_perm)
errG -= output.mean()
# Calculate gradients for G
errG.backward()
optG.step()
# Output training stats & show imgs
if i % 25 == 0:
netG.eval()
with torch.no_grad():
torch.save(netG.state_dict(), pth + '_Gen.pt')
wandb.save(pth + '_Gen.pt')
torch.save(netD.state_dict(), pth + '_Disc.pt')
noise = torch.randn(1, nz,lz,lz,lz, device=device)
img = netG(noise)
###Print progress
## calc ETA
steps = len(dataloaderx)
util.calc_eta(steps, time.time(), start, i, epoch, num_epochs)
###save example slices
util.test_plotter(img, 5, imtype, pth)
# plotting graphs
# util.graph_plot([disc_real_log, disc_fake_log], ['real', 'perp'], pth, 'LossGraph')
# util.graph_plot([Wass_log], ['Wass Distance'], pth, 'WassGraph')
# util.graph_plot([gp_log], ['Gradient Penalty'], pth, 'GpGraph')
netG.train()
| nilq/baby-python | python |
from typing import List
from ..error import GraphQLError
from ..language import DocumentNode
from ..type import GraphQLSchema
__all__ = ["find_deprecated_usages"]
def find_deprecated_usages(
schema: GraphQLSchema, ast: DocumentNode
) -> List[GraphQLError]: # pragma: no cover
"""Get a list of GraphQLError instances describing each deprecated use.
.. deprecated:: 3.1.3
Please use ``validate`` with ``NoDeprecatedCustomRule`` instead::
from graphql import validate, NoDeprecatedCustomRule
errors = validate(schema, document, [NoDeprecatedCustomRule])
"""
from ..validation import validate, NoDeprecatedCustomRule
return validate(schema, ast, [NoDeprecatedCustomRule])
| nilq/baby-python | python |
from .target_generators import HeatmapGenerator
from .target_generators import ScaleAwareHeatmapGenerator
from .target_generators import JointsGenerator
__all__ = ['HeatmapGenerator', 'ScaleAwareHeatmapGenerator', 'JointsGenerator']
| nilq/baby-python | python |
import re
from typing import Annotated, Any, Optional
import pytest
from arti import (
Annotation,
Artifact,
Fingerprint,
PartitionDependencies,
Producer,
StoragePartitions,
)
from arti import producer as producer_decorator # Avoid shadowing
from arti.internal.models import Model
from arti.internal.utils import frozendict
from arti.producers import ValidateSig
from arti.types import Collection, Int64, Struct
from arti.versions import String as StringVersion
from arti.views import python as python_views
from tests.arti.dummies import A1, A2, A3, A4, P1, P2, DummyStorage
Int64Artifact = Artifact.from_type(Int64())
class DummyProducer(Producer):
a1: A1
@staticmethod
def build(a1: dict) -> tuple[Annotated[dict, A2], Annotated[dict, A3]]: # type: ignore
pass
@staticmethod
def map(a1: StoragePartitions) -> PartitionDependencies:
pass
def check_model_matches(a: Model, b: Model, *, exclude: set[str]) -> None:
assert a.dict(exclude=exclude) == b.dict(exclude=exclude)
def test_Producer() -> None:
a1 = A1()
producer = DummyProducer(a1=a1)
assert producer.a1 == a1
assert len(list(producer)) == 2
expected_output_classes = [A2, A3]
for i, output in enumerate(producer):
assert isinstance(output, expected_output_classes[i])
def test_producer_decorator() -> None:
@producer_decorator()
def dummy_producer(a1: Annotated[dict, A1]) -> Annotated[dict, A2]: # type: ignore
return {}
assert dummy_producer.__name__ == "dummy_producer"
assert dummy_producer._input_artifact_types_ == frozendict(a1=A1)
assert len(dummy_producer._output_metadata_) == 1
assert dummy_producer._output_metadata_[0][0] == A2
assert dummy_producer(a1=A1()).annotations == Producer.__fields__["annotations"].default
assert dummy_producer(a1=A1()).version == Producer.__fields__["version"].default
class MyAnnotation(Annotation):
pass
def mapper() -> PartitionDependencies:
return PartitionDependencies()
@producer_decorator(
annotations=(MyAnnotation(),), map=mapper, name="test", version=StringVersion(value="test")
)
def dummy_producer2(a1: Annotated[dict, A1]) -> Annotated[dict, A2]: # type: ignore
return {}
assert dummy_producer2.__name__ == "test"
assert dummy_producer2.map == mapper
assert dummy_producer2(a1=A1()).annotations == (MyAnnotation(),)
assert dummy_producer2(a1=A1()).version == StringVersion(value="test")
def test_producer_input_metadata() -> None:
@producer_decorator()
def dummy_producer(
a1: Annotated[dict, A1], *, a: int, b: Annotated[int, "non-Artifact"] # type: ignore
) -> Annotated[dict, A2]: # type: ignore
return {}
assert dummy_producer._input_artifact_types_ == frozendict(
a1=A1, a=Int64Artifact, b=Int64Artifact
)
def test_Producer_partitioned_input_validation() -> None:
class A(Artifact):
type = Collection(element=Struct(fields={"x": Int64()}), partition_by=("x",))
class P(Producer):
a: A
@staticmethod
def build(a: list[dict]) -> Annotated[dict, A2]: # type: ignore
pass
assert P._input_artifact_types_ == frozendict(a=A)
assert P._build_input_views_ == frozendict(a=python_views.List)
with pytest.raises(ValueError, match="dict.* cannot be used to represent Collection"):
class SingularInput(Producer):
a: A
@staticmethod
def build(a: dict) -> Annotated[dict, A2]: # type: ignore
pass
with pytest.raises(
ValueError, match=re.escape("list[int] cannot be used to represent Collection")
):
class IncompatibleInput(Producer):
a: A
@staticmethod
def build(a: list[int]) -> Annotated[dict, A]: # type: ignore
pass
def test_Producer_output_metadata() -> None:
assert DummyProducer._output_metadata_ == ((A2, python_views.Dict), (A3, python_views.Dict))
class ImplicitArtifact(Producer):
a1: A1
@classmethod
def build(cls, a1: dict) -> tuple[int, Annotated[dict, A2]]: # type: ignore
pass
assert ImplicitArtifact._output_metadata_ == (
(Artifact.from_type(Int64()), python_views.Int),
(A2, python_views.Dict),
)
class ExplicitView(Producer):
a1: A1
@staticmethod
def build(a1: dict) -> Annotated[dict, A2, python_views.Dict]: # type: ignore
pass
assert ExplicitView._output_metadata_ == ((A2, python_views.Dict),)
with pytest.raises(
ValueError, match=re.escape("DupView.build 1st return (A2) - multiple Views set")
):
class DupView(Producer):
a1: A1
@staticmethod
def build(a1: dict) -> Annotated[dict, A2, python_views.Dict, python_views.Int]: # type: ignore
pass
with pytest.raises(ValueError, match="DupArtifact.build 1st return - multiple Artifacts set"):
class DupArtifact(Producer):
a1: A1
@staticmethod
def build(a1: dict) -> Annotated[dict, A1, A2]: # type: ignore
pass
def test_Producer_string_annotation() -> None:
# This may be from `x: "Type"` or `from __future__ import annotations`.
class StrAnnotation(Producer):
a1: "A1"
@staticmethod
def build(a1: "dict") -> "Annotated[dict, A2]": # type: ignore
pass
assert isinstance(StrAnnotation(a1=A1()).out(), A2)
def test_Producer_fingerprint() -> None:
p1 = P1(a1=A1())
assert p1.fingerprint == Fingerprint.from_string(
f'P1:{{"a1": {p1.a1.fingerprint.key}, "version": {p1.version.fingerprint.key}}}'
)
def test_Producer_compute_input_fingerprint() -> None:
p1 = P1(a1=A1(storage=DummyStorage(key="test")))
assert p1.compute_input_fingerprint(
frozendict(a1=StoragePartitions())
) == Fingerprint.from_string(p1._class_key_).combine(p1.version.fingerprint)
storage_partition = p1.a1.storage.generate_partition().copy(
update={"content_fingerprint": Fingerprint.from_int(10)}
)
assert p1.compute_input_fingerprint(
frozendict(a1=StoragePartitions([storage_partition]))
) == Fingerprint.from_string(p1._class_key_).combine(
p1.version.fingerprint, storage_partition.content_fingerprint
)
with pytest.raises(
ValueError, match=re.escape("Mismatched dependency inputs; expected {'a1'}, got {'junk'}")
):
p1.compute_input_fingerprint(frozendict(junk=StoragePartitions()))
def test_Producer_out() -> None:
a1, a2, a3, a4 = A1(), A2(), A3(), A4()
# single return Producer
p1 = P1(a1=a1)
a2_ = p1.out(a2)
# multi return Producer
p2 = P2(a2=a2)
a3_, a4_ = p2.out(a3, a4)
for (producer, inp, out, type_, position) in (
(p1, a2, a2_, A2, 0),
(p2, a3, a3_, A3, 0),
(p2, a4, a4_, A4, 1),
):
assert inp is not out
assert isinstance(out, type_)
assert out.producer_output is not None
assert out.producer_output.producer == producer
assert out.producer_output.position == position
check_model_matches(inp, out, exclude={"producer_output"})
assert list(p1) == [a2_]
assert list(p2) == [a3_, a4_]
def test_Producer_map_artifacts() -> None:
class P(Producer):
a1: A1
@staticmethod
def build(a1: dict) -> Annotated[dict, A2]: # type: ignore
pass
@staticmethod
def map(a1: StoragePartitions) -> PartitionDependencies:
pass
assert P._map_input_metadata_ == frozendict(a1=A1)
with pytest.raises(
ValueError,
match="BadMapParam.map a1 param - type hint must be `StoragePartitions`",
):
class BadMapParam(P):
@staticmethod
def map(a1: list) -> PartitionDependencies: # type: ignore
pass
def test_Producer_validate_output() -> None:
positive, negative = (True, "Positive"), (False, "Negative")
def is_positive(i: int) -> tuple[bool, str]:
return positive if i >= 0 else negative
@producer_decorator(validate_outputs=is_positive)
def p(x: int) -> int:
return x
assert p.validate_outputs(p.build(1)) == positive
assert p.validate_outputs(p.build(-1)) == negative
def test_Producer_validate_output_hint_validation() -> None:
def validate_any(i: Any) -> tuple[bool, str]:
return bool(i), ""
def validate_vargs_any(*vals: Any) -> tuple[bool, str]:
return bool(vals), ""
def validate_int(i: int) -> tuple[bool, str]:
return bool(i), ""
for validate_outputs in list[ValidateSig](
[
lambda x: (True, ""),
validate_any,
validate_vargs_any,
validate_int,
]
):
@producer_decorator(validate_outputs=validate_outputs)
def single_return_build(x: int) -> int:
return x
assert single_return_build.validate_outputs(5)
with pytest.raises(ValueError, match="i param - type hint must be `Any` or "):
def accepts_vargs_float(*i: float) -> tuple[bool, str]:
return bool(i), ""
@producer_decorator(validate_outputs=accepts_vargs_float)
def bad_vargs(x: int) -> int:
return x
with pytest.raises(ValueError, match="validate_output - must match the `.build` return"):
@producer_decorator(validate_outputs=validate_int)
def too_few_arg(x: int) -> tuple[int, int]:
return x, x + 1
with pytest.raises(ValueError, match="validate_output i param - must not have a default."):
@producer_decorator(validate_outputs=lambda i=5: (True, ""))
def bad_default(x: int) -> int:
return x
with pytest.raises(
ValueError, match="validate_output i param - must be usable as a positional argument."
):
def validate_kwarg(*, i: int) -> tuple[bool, str]:
return bool(i), ""
@producer_decorator(validate_outputs=validate_kwarg)
def kwarg_only(x: int) -> int:
return x
with pytest.raises(
ValueError, match="validate_output i param - type hint must match the 1st `.build` return"
):
def accepts_float(i: float) -> tuple[bool, str]:
return bool(i), ""
@producer_decorator(validate_outputs=accepts_float)
def mismatched_hint(x: int) -> int:
return x
def test_Producer_build_outputs_check() -> None:
class A(Artifact):
type = Int64()
class B(Artifact):
type = Int64()
class C(Artifact):
type = Collection(element=Struct(fields={"a": Int64()}), partition_by=("a",))
class D(Artifact):
type = Collection(element=Struct(fields={"a": Int64(), "b": Int64()}), partition_by=("b",))
class NoPartitioning(Producer):
@staticmethod
def build() -> tuple[Annotated[int, A], Annotated[int, B]]:
pass
class MatchingPartitioning(Producer):
@staticmethod
def build() -> tuple[Annotated[list[dict], C], Annotated[list[dict], C]]: # type: ignore
pass
@staticmethod
def map() -> PartitionDependencies:
return PartitionDependencies()
for first_output in [Annotated[int, A], Annotated[list[dict], C]]: # type: ignore
with pytest.raises(
ValueError, match="all output Artifacts must have the same partitioning scheme"
):
class MixedPartitioning(Producer):
@staticmethod
def build() -> tuple[first_output, Annotated[list[dict], D]]: # type: ignore
pass
with pytest.raises(
ValueError,
match=r"BadProducer.map - must be implemented when the `build` outputs are partitioned",
):
class BadProducer(Producer): # noqa: F811
@staticmethod
def build() -> Annotated[list[dict], C]: # type: ignore
pass
def test_Producer_bad_signature() -> None: # noqa: C901
# pylint: disable=function-redefined
# Ensure no error if _abstract_
class OkProducer(Producer):
_abstract_ = True
with pytest.raises(ValueError, match="BadProducer.build - must be implemented"):
class BadProducer(Producer):
pass
with pytest.raises(
ValueError,
match=r"BadProducer.build - the following parameter\(s\) must be defined as a field: {'a1'}",
):
class BadProducer(Producer): # type: ignore # noqa: F811
@classmethod
def build(cls, a1: dict) -> Annotated[dict, A2]: # type: ignore
pass
with pytest.raises(
ValueError,
match=r"BadProducer.map - the following parameter\(s\) must be defined as a field: {'a1'}",
):
class BadProducer(Producer): # type: ignore # noqa: F811
@classmethod
def build(cls) -> Annotated[dict, A2]: # type: ignore
pass
@classmethod
def map(cls, a1: StoragePartitions) -> PartitionDependencies:
pass
with pytest.raises(
ValueError,
match=r"BadProducer - the following fields aren't used in `.build` or `.map`: {'a2'}",
):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1
a2: A2
@classmethod
def build(cls, a1: dict) -> Annotated[dict, A3]: # type: ignore
pass
with pytest.raises(ValueError, match="must have a type hint"):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1
@classmethod
def build(cls, a1): # type: ignore
pass
with pytest.raises(ValueError, match="type hint must be an Artifact subclass"):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: str
@classmethod
def build(cls, a1: str) -> tuple[A2, A3]:
pass
with pytest.raises(ValueError, match="must not have a default"):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1
@classmethod
def build(cls, a1: dict = A1()): # type: ignore
pass
with pytest.raises(ValueError, match="must be usable as a keyword argument"):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1
@classmethod
def build(cls, a1: dict, /): # type: ignore
pass
with pytest.raises(ValueError, match="must be usable as a keyword argument"):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1
@classmethod
def build(cls, *a1: dict): # type: ignore
pass
with pytest.raises(ValueError, match="must be usable as a keyword argument"):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1
@classmethod
def build(cls, **a1: dict): # type: ignore
pass
with pytest.raises(ValueError, match="a return value must be set"):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1
@classmethod
def build(cls, a1: dict): # type: ignore
pass
with pytest.raises(ValueError, match="missing return signature"):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1
@classmethod
def build(cls, a1: dict) -> None: # type: ignore
pass
with pytest.raises(
ValueError, match="BadProducer.a1 - field must not have a default nor be Optional."
):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1 = None # type: ignore
@classmethod
def build(cls, a1: dict): # type: ignore
pass
with pytest.raises(
ValueError, match="BadProducer.a1 - field must not have a default nor be Optional."
):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: Optional[A1]
@classmethod
def build(cls, a1: dict): # type: ignore
pass
with pytest.raises(
ValueError,
match=r"BadProducer.a1 - field must not have a default nor be Optional.",
):
class BadProducer(Producer): # type: ignore # noqa: F811
a1: A1 = A1()
@classmethod
def build(cls, a1: dict) -> A2: # type: ignore
pass
with pytest.raises(ValueError, match=r"str.* cannot be used to represent Struct"):
class BadProducer(Producer): # type: ignore # noqa: F811
@classmethod
def build(cls) -> Annotated[str, A2]:
pass
with pytest.raises(
ValueError,
match=r"BadProducer.build - must be a @classmethod or @staticmethod",
):
class BadProducer(Producer): # type: ignore # noqa: F811
def build(cls) -> Annotated[dict, A2]: # type: ignore
pass
with pytest.raises(
ValueError,
match=r"BadProducer.map - must be a @classmethod or @staticmethod",
):
class BadProducer(Producer): # type: ignore # noqa: F811
@classmethod
def build(cls) -> Annotated[dict, A2]: # type: ignore
pass
def map(cls) -> PartitionDependencies:
pass
def test_Producer_bad_init() -> None:
with pytest.raises(ValueError, match="cannot be instantiated directly"):
Producer()
with pytest.raises(ValueError, match="extra fields not permitted"):
DummyProducer(junk=5)
with pytest.raises(ValueError, match="field required"):
DummyProducer()
with pytest.raises(ValueError, match="expected an instance of"):
DummyProducer(a1=5)
with pytest.raises(ValueError, match="expected an instance of"):
DummyProducer(a1=A2())
def test_Producer_bad_out() -> None:
producer = DummyProducer(a1=A1())
with pytest.raises(ValueError, match="expected 2 arguments of"):
producer.out(1) # type: ignore
with pytest.raises(
ValueError, match=r"DummyProducer.out\(\) 1st argument - expected instance of"
):
producer.out(1, 2) # type: ignore
with pytest.raises(
ValueError, match=r"DummyProducer.out\(\) 2nd argument - expected instance of"
):
producer.out(A2(), A2())
output = producer.out(A2(), A3())
with pytest.raises(ValueError, match="is produced by"):
producer.out(*output)
| nilq/baby-python | python |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.feeding_device_codes import (
FeedingDeviceCodes as FeedingDeviceCodes_,
)
from oops_fhir.r4.code_system.snomed_ct import SNOMEDCT
__all__ = ["FeedingDeviceCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class FeedingDeviceCodes(ValueSet):
"""
Feeding Device Codes
Materials used or needed to feed the patient.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/feeding-device
"""
# TODO: fix this template issue1
pass
class Meta:
resource = _resource
| nilq/baby-python | python |
import time
import os
import numpy as np
from perform.constants import REAL_TYPE
class RomSpaceMapping:
"""Base class for mapping to/from the state/latent space."""
def __init__(self, sol_domain, rom_domain, rom_model):
rom_dict = rom_domain.rom_dict
model_idx = rom_model.model_idx
self.latent_dim = rom_model.latent_dim
self.sol_shape = rom_model.sol_shape
# all mappings require scaling by default, specific methods may include additional scalings
model_dir = rom_dict["model_dir"]
self.cent_prof = self.load_feature_scaling(
os.path.join(model_dir, rom_dict["cent_profs"][model_idx]), default="zeros"
)
self.norm_fac_prof = self.load_feature_scaling(
os.path.join(model_dir, rom_dict["norm_fac_profs"][model_idx]), default="ones"
)
self.norm_sub_prof = self.load_feature_scaling(
os.path.join(model_dir, rom_dict["norm_sub_profs"][model_idx]), default="zeros"
)
if callable(getattr(rom_domain.rom_method, "load_extra_scalings", None)):
rom_domain.rom_method.load_extra_scalings(model_idx, sol_domain, rom_domain)
# specific mapping loading functions implemented by child classes
self.load_mapping()
# TODO: initialize decoder Jacobian memory once
def load_feature_scaling(self, scaling_input, default="zeros"):
"""Load a normalization or centering profile from NumPy binary.
Args:
scaling_input: String path to scaling profile NumPy binary.
default: String indicating default profile if loading fails due to size mismatch or load failure.
Returns:
scaling_prof: NumPy array of scaling profile loaded (or default, if load failed).
"""
try:
# Load single complete standardization profile from file
scaling_prof = np.load(scaling_input)
assert scaling_prof.shape == self.sol_shape
return scaling_prof
except AssertionError:
print("Standardization profile at " + scaling_input + " did not match solution shape")
if default == "zeros":
print("WARNING: standardization load failed or not specified, defaulting to zeros")
time.sleep(1.0)
scaling_prof = np.zeros(self.sol_shape, dtype=REAL_TYPE)
elif default == "ones":
print("WARNING: standardization load failed or not specified, defaulting to ones")
time.sleep(1.0)
scaling_prof = np.zeros(self.sol_shape, dtype=REAL_TYPE)
else:
raise ValueError("Invalid default: " + str(default))
return scaling_prof
def scale_profile(
self, arr_in, normalize=True, norm_fac_prof=None, norm_sub_prof=None, center=True, cent_prof=None, inverse=False
):
"""(De-)centers and/or (de-)normalizes solution profile.
Depending on argument flags, centers and/or normalizes solution profile, or de-normalizes
and/or de-centers solution profile.
If inverse is False:
arr = (arr_in - cent_prof - norm_sub_prof) / norm_fac_prof
If inverse is True:
arr = arr_in * norm_fac_prof + norm_sub_prof + cent_prof
Args:
arr_in: NumPy array of solution profile to be scaled.
normalize: Boolean flag indicating whether arr_in should be (de-)normalized.
norm_fac_prof: NumPy array of divisive normalization profile.
norm_sub_prof: NumPy array of subtractive normalization profile.
center: Boolean flag indicating whether arr_in should be (de-)centered.
cent_prof: NumPy array of centering profile.
inverse: If True, de-normalize and de-center. If False, center and normalize.
Returns:
(De)-centered and/or (de)-normalized copy of arr_in.
"""
arr = arr_in.copy()
assert normalize or center, "Must either (de-)center or (de-)normalize."
if normalize:
assert norm_fac_prof is not None, "Must provide normalization division factor to normalize"
assert norm_sub_prof is not None, "Must provide normalization subtractive factor to normalize"
if center:
assert cent_prof is not None, "Must provide centering profile to center"
# de-normalize and de-center
if inverse:
if normalize:
arr = self.normalize(arr, norm_fac_prof, norm_sub_prof, denormalize=True)
if center:
arr = self.center(arr, cent_prof, decenter=True)
# center and normalize
else:
if center:
arr = self.center(arr, cent_prof, decenter=False)
if normalize:
arr = self.normalize(arr, norm_fac_prof, norm_sub_prof, denormalize=False)
return arr
def center(self, arr_in, cent_prof, decenter=False):
"""(De)center input vector according to provided centering profile.
Args:
arr_in: NumPy array to be (de-)centered.
cent_prof: NumPy array of centering profile.
decenter: If True, decenter profile. If False, center profile.
Returns:
(De-)centered copy of arr_in.
"""
if decenter:
arr = arr_in + cent_prof
else:
arr = arr_in - cent_prof
return arr
def normalize(self, arr_in, norm_fac_prof, norm_sub_prof, denormalize=False):
"""(De)normalize input vector according to subtractive and divisive normalization profiles.
Args:
arr_in: NumPy array to be (de-)normalized.
norm_fac_prof: NumPy array of divisive normalization profile.
norm_sub_prof: NumPy array of subtractive normalization profile.
denormalize: If True, denormalize profile. If False, normalize profile.
Returns:
(De-)normalized copy of arr_in.
"""
if denormalize:
arr = arr_in * norm_fac_prof + norm_sub_prof
else:
arr = (arr_in - norm_sub_prof) / norm_fac_prof
return arr
def encode_decode_series(self, sol_series_in):
"""Compute encoding and decoding of a list of solution arrays"""
if isinstance(sol_series_in, np.ndarray):
sol_series_in = [sol_series_in]
code_series_out = []
sol_series_out = []
for sol in sol_series_in:
code_series_out.append(self.encode_sol(sol))
sol_series_out.append(self.decode_sol(code_series_out[-1]))
return code_series_out, sol_series_out
def encode_sol(self, sol_in):
sol = self.scale_profile(
sol_in,
normalize=True,
norm_fac_prof=self.norm_fac_prof,
norm_sub_prof=self.norm_sub_prof,
center=True,
cent_prof=self.cent_prof,
inverse=False,
)
code = self.apply_encoder(sol)
return code
def decode_sol(self, code_in):
"""Compute full decoding of solution, including de-centering and de-normalization.
Maps low-dimensional code to full-dimensional state, and de-centers and de-normalizes.
Note that the apply_decoder is implemented within child classes, as these are specific to a given mapping.
Args:
code_in: low-dimensional code to be decoded.
Returns:
Full-dimensional solution NumPy array resulting from decoding and de-scaling.
"""
sol = self.apply_decoder(code_in)
sol = self.scale_profile(
sol,
normalize=True,
norm_fac_prof=self.norm_fac_prof,
norm_sub_prof=self.norm_sub_prof,
center=True,
cent_prof=self.cent_prof,
inverse=True,
)
return sol
| nilq/baby-python | python |
from dl.nn.Module import Module
import dl.graph.op as OP
from dl.graph import variable
class DropoutLayer(Module):
"""
Dropout layer object.
"""
def __init__(self, rate: float):
"""
Dropout layer object.
Parameters
----------
rate:
Dropout rate.
"""
super().__init__()
self.op = OP.Dropout(rate)
def forward(self, x) -> variable.Variable:
"""
Process the dropout operation.
See details at dl.graph.op.Dropout
Parameters
----------
x:
Input
Returns
-------
out:
output
"""
return self.op(x)
def eval(self):
"""
Set the layer to evaluation mode. in this mode, dropout will not be performed.
Returns
-------
out:
None
"""
self.op.eval = True
def train(self):
"""
Set the layer to evaluation mode. in this mode, dropout will be performed.
Returns
-------
out:
None
"""
self.op.eval = False
| nilq/baby-python | python |
import torch.distributed as dist
from .trainer import Trainer
from ..util import DDP
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= size
class DistTrainer(Trainer):
"""
Distributed trainer for multi-gpu training. (not finish yet)
"""
def run_step(self, model, batch, mode='train'):
output, loss, loss_stats = model.module.forward_train(batch)
loss = loss.mean()
if mode == 'train':
self.optimizer.zero_grad()
loss.backward()
average_gradients(model)
self.optimizer.step()
return output, loss, loss_stats
def set_device(self, batch_per_gpu, rank, device):
"""
Set model device for Distributed-Data-Parallel
:param batch_per_gpu: batch size of each gpu
:param rank: distributed training process rank
:param device: cuda
"""
self.rank = rank
self.model = DDP(batch_per_gpu, module=self.model.cuda(), device_ids=[rank], output_device=rank)
| nilq/baby-python | python |
from .answer import Answer, CalculatedAnswer, DragText, NumericalAnswer
from .enums import *
from .questions import (QCalculated, QCalculatedMultichoice, QCalculatedSimple,
QCloze, QDescription, QDragAndDropImage,
QDragAndDropMarker, QDragAndDropText, QEssay,
QMatching, QMissingWord, QMultichoice, QNumerical,
QRandomMatching, QShortAnswer, QTrueFalse)
__author__ = "Lucas Wolfgang"
__version__ = "0.0.1"
__all__ = ["GUI", "main", "Answer", "DragText", "NumericalAnswer", "CalculatedAnswer",
"QDescription", "QCalculated", "QCalculatedSimple",
"QCalculatedMultichoice", "QCloze", "QDragAndDropText",
"QDragAndDropImage", "QDragAndDropMarker", "QEssay",
"QMatching", "QRandomMatching", "QMissingWord", "QMultichoice",
"QNumerical", "QShortAnswer", "QTrueFalse"]
| nilq/baby-python | python |
import warnings
from collections import OrderedDict
import pandas as pd
from . import dtypes, utils
from .alignment import align
from .variable import IndexVariable, Variable, as_variable
from .variable import concat as concat_vars
def concat(
objs,
dim=None,
data_vars="all",
coords="different",
compat="equals",
positions=None,
indexers=None,
mode=None,
concat_over=None,
fill_value=dtypes.NA,
join="outer",
):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition to the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
if dim is None:
warnings.warn(
"the `dim` argument to `concat` will be required "
"in a future version of xarray; for now, setting it to "
"the old default of 'concat_dim'",
FutureWarning,
stacklevel=2,
)
dim = "concat_dims"
if indexers is not None: # pragma: no cover
warnings.warn(
"indexers has been renamed to positions; the alias "
"will be removed in a future version of xarray",
FutureWarning,
stacklevel=2,
)
positions = indexers
if mode is not None:
raise ValueError(
"`mode` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if concat_over is not None:
raise ValueError(
"`concat_over` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError(
"can only concatenate xarray Dataset and DataArray "
"objects, got %s" % type(first_obj)
)
return f(objs, dim, data_vars, coords, compat, positions, fill_value, join)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
from .dataarray import DataArray
if isinstance(dim, str):
coord = None
elif not isinstance(dim, (DataArray, Variable)):
dim_name = getattr(dim, "name", None)
if dim_name is None:
dim_name = "concat_dim"
coord = IndexVariable(dim_name, dim)
dim = dim_name
elif not isinstance(dim, DataArray):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
# Return values
concat_over = set()
equals = {}
if dim in datasets[0]:
concat_over.add(dim)
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items() if dim in v.dims)
def process_subset_opt(opt, subset):
if isinstance(opt, str):
if opt == "different":
# all nonindexes that are not the same in each dataset
for k in getattr(datasets[0], subset):
if k not in concat_over:
# Compare the variable of all datasets vs. the one
# of the first dataset. Perform the minimum amount of
# loads in order to avoid multiple loads from disk
# while keeping the RAM footprint low.
v_lhs = datasets[0].variables[k].load()
# We'll need to know later on if variables are equal.
computed = []
for ds_rhs in datasets[1:]:
v_rhs = ds_rhs.variables[k].compute()
computed.append(v_rhs)
if not v_lhs.equals(v_rhs):
concat_over.add(k)
equals[k] = False
# computed variables are not to be re-computed
# again in the future
for ds, v in zip(datasets[1:], computed):
ds.variables[k].data = v.data
break
else:
equals[k] = True
elif opt == "all":
concat_over.update(
set(getattr(datasets[0], subset)) - set(datasets[0].dims)
)
elif opt == "minimal":
pass
else:
raise ValueError("unexpected value for %s: %s" % (subset, opt))
else:
invalid_vars = [k for k in opt if k not in getattr(datasets[0], subset)]
if invalid_vars:
if subset == "coords":
raise ValueError(
"some variables in coords are not coordinates on "
"the first dataset: %s" % (invalid_vars,)
)
else:
raise ValueError(
"some variables in data_vars are not data variables "
"on the first dataset: %s" % (invalid_vars,)
)
concat_over.update(opt)
process_subset_opt(data_vars, "data_vars")
process_subset_opt(coords, "coords")
return concat_over, equals
def _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ["equals", "identical"]:
raise ValueError(
"compat=%r invalid: must be 'equals' " "or 'identical'" % compat
)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(
*datasets, join=join, copy=False, exclude=[dim], fill_value=fill_value
)
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if compat == "identical" and not utils.dict_equiv(ds.attrs, result_attrs):
raise ValueError("dataset global attributes not equal")
for k, v in ds.variables.items():
if k not in result_vars and k not in concat_over:
raise ValueError("encountered unexpected variable %r" % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError(
"%r is a coordinate in some datasets but not " "others" % k
)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == "identical" and not utils.dict_equiv(
v.attrs, result_vars[k].attrs
):
raise ValueError("variable %s not identical across datasets" % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError("variable %s not equal across datasets" % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(
non_concat_dims.get(d, dim_len) for d in common_dims
)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(
arrays,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
arrays = list(arrays)
if data_vars != "all":
raise ValueError(
"data_vars is not a valid argument when " "concatenating DataArray objects"
)
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == "identical":
raise ValueError("array names not identical")
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=fill_value,
join=join,
)
return arrays[0]._from_temp_dataset(ds, name)
| nilq/baby-python | python |
import re
import os
try:
from urlparse import urlparse
except:
from urllib.parse import urlparse
from .exceptions import FieldValidationException
from .universal_forwarder_compatiblity import UF_MODE, make_splunkhome_path
from .contrib.ipaddress import ip_network
try:
from .server_info import ServerInfo
except ImportError:
ServerInfo = None
class Field(object):
"""
This is the base class that should be used to for field validators. Sub-class this and
override to_python if you need custom validation.
"""
DATA_TYPE_STRING = 'string'
DATA_TYPE_NUMBER = 'number'
DATA_TYPE_BOOLEAN = 'boolean'
def get_data_type(self):
"""
Get the type of the field.
"""
return Field.DATA_TYPE_STRING
def __init__(self, name, title, description, none_allowed=False, empty_allowed=True,
required_on_create=None, required_on_edit=None):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human readable description of the field (e.g. "The IP or domain name
of the database server")
none_allowed -- Is a value of none allowed?
empty_allowed -- Is an empty string allowed?
required_on_create -- Is this field required when creating?
required_on_edit -- Is this field required when editing?
"""
# Try to set required_on_create and required_on_edit to sane defaults if not defined
if required_on_create is None and none_allowed:
required_on_create = False
elif required_on_create is None and not none_allowed:
required_on_create = True
if required_on_edit is None and required_on_create is not None:
required_on_edit = required_on_create
if name is None:
raise ValueError("The name parameter cannot be none")
if len(name.strip()) == 0:
raise ValueError("The name parameter cannot be empty")
if title is None:
raise ValueError("The title parameter cannot be none")
if len(title.strip()) == 0:
raise ValueError("The title parameter cannot be empty")
if description is None:
raise ValueError("The description parameter cannot be none")
if len(description.strip()) == 0:
raise ValueError("The description parameter cannot be empty")
self.name = name
self.title = title
self.description = description
self.none_allowed = none_allowed
self.empty_allowed = empty_allowed
self.required_on_create = required_on_create
self.required_on_edit = required_on_edit
def to_python(self, value, session_key=None):
"""
Convert the field to a Python object. Should throw a FieldValidationException if the data
is invalid.
Arguments:
value -- The value to convert
session_key- The session key to access Splunk (if needed)
"""
if not self.none_allowed and value is None:
raise FieldValidationException("The value for the '%s' parameter cannot be empty" % (self.name))
if not self.empty_allowed and len(str(value).strip()) == 0:
raise FieldValidationException("The value for the '%s' parameter cannot be empty" % (self.name))
return value
def to_string(self, value):
"""
Convert the field to a string value that can be returned. Should throw a
FieldValidationException if the data is invalid.
Arguments:
value -- The value to convert
"""
return str(value)
class BooleanField(Field):
"""
A validator that converts string versions of boolean to a real boolean.
"""
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value in [True, False]:
return value
elif str(value).strip().lower() in ["true", "1"]:
return True
elif str(value).strip().lower() in ["false", "0"]:
return False
raise FieldValidationException("The value of '%s' for the '%s' parameter is not a valid boolean" % (str(value), self.name))
def to_string(self, value):
if value == True:
return "1"
elif value == False:
return "0"
return str(value)
def get_data_type(self):
return Field.DATA_TYPE_BOOLEAN
class ListField(Field):
"""
A validator that converts a comma seperated string to an array.
You can use the instance_class argument to convert individual items in the array to particular
type. That way, you can have a list of Python objects that are already converted to the values
you want. Consider this example that will include a list of parsed IP network ranges:
list_field = ListField('name', 'title', 'description', instance_class=IPNetworkField)
parsed_ip_ranges = list_field.to_python(u'10.0.0.0/28,1.2.3.4,10.0.1.0/28')
"""
def __init__(self, name, title, description, none_allowed=False, empty_allowed=True,
required_on_create=None, required_on_edit=None, instance_class=None,
trim_values=False):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human readable description of the field (e.g. "The IP or domain name
of the database server")
none_allowed -- Is a value of none allowed?
empty_allowed -- Is an empty string allowed?
required_on_create -- Is this field required when creating?
required_on_edit -- Is this field required when editing?
instance_class -- The name of the class to use for constructing individual objects
trim_values -- Trim whitespace off of the ends of the values in case that spaces between
the list are not included
"""
super(ListField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit)
self.instance_class = instance_class
self.trim_values = trim_values
# Create an instance for converting the values
if self.instance_class is not None:
self.instance = self.instance_class(self.name, self.title, self.description)
else:
self.instance = None
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
# Convert the value into an array
values_list = None
if value is not None:
values_list = value.split(",")
else:
values_list = []
# Trim the values if requested
if self.trim_values:
values_list = [value.strip() for value in values_list]
# If we have no instances class, then just return the plain list
if self.instance_class is None:
return values_list
# Otherwise, convert the instances accordingly
else:
# Convert the value
instances_list = []
for instance_value in values_list:
instances_list.append(self.instance.to_python(instance_value))
return instances_list
def to_string(self, value):
if value is not None:
# Use the instance to_string if we have an instance
if self.instance is not None:
values_list = []
for individual_value in value:
values_list.append(self.instance.to_string(individual_value))
return ",".join(values_list)
# Otherwise, process it as a string
else:
return ",".join(value)
return ""
class StaticListField(Field):
"""
This allows you to specify a list of field values that are allowed.
All other values will be rejected.
"""
_valid_values = None
def __init__(self, name, title, description, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None, valid_values=None):
super(StaticListField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit)
self.valid_values = valid_values
@property
def valid_values(self):
return self._valid_values
@valid_values.setter
def valid_values(self, values):
self._valid_values = values
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is None:
return None
elif value not in self.valid_values:
raise FieldValidationException('The value of the "' + self.name + '" field is invalid, it must be one of:' + ','.join(self.valid_values))
else:
return value
class RegexField(Field):
"""
A validator that validates input matches a regular expression.
"""
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is not None:
try:
return re.compile(value)
except Exception as exception:
raise FieldValidationException(str(exception))
else:
return None
def to_string(self, value):
if value is not None:
return value.pattern
return ""
class WildcardField(Field):
"""
Much like a regular expression field but takes wildcards. This will return a regular expression.
"""
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is not None:
try:
regex_escaped = re.escape(value)
regex_escaped = regex_escaped.replace('\*', ".*")
return re.compile(regex_escaped)
except Exception as exception:
raise FieldValidationException(str(exception))
else:
return None
def to_string(self, value):
if value is not None:
return value.pattern
return ""
class IntegerField(Field):
"""
A validator that converts string input to an integer.
"""
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is not None:
try:
return int(value)
except ValueError as exception:
raise FieldValidationException(str(exception))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class FloatField(Field):
"""
A validator that converts string input to a float.
"""
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is not None:
try:
return float(value)
except ValueError as exception:
raise FieldValidationException(str(exception))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class RangeField(Field):
"""
A validator that converts string input to a pair of integers indicating a range.
"""
def __init__(self, name, title, description, low, high, none_allowed=False, empty_allowed=True, required_on_create=None, required_on_edit=None):
super(RangeField, self).__init__(name, title, description, none_allowed,
empty_allowed, required_on_create, required_on_edit)
self.low = low
self.high = high
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is not None:
try:
tmp = int(value)
if tmp < self.low:
raise FieldValidationException("The value of '%s' for the '%s' parameter must be greater than or equal to '%r'" % (str(value), self.name, self.low))
if tmp > self.high:
raise FieldValidationException("The value of '%s' for the '%s' parameter must be less than or equal to '%r'" % (str(value), self.name, self.high))
return tmp
except ValueError as exception:
raise FieldValidationException(str(exception))
else:
return None
def to_string(self, value):
if value is not None:
return str(value)
return ""
def get_data_type(self):
return Field.DATA_TYPE_NUMBER
class URLField(Field):
"""
Represents a URL. The URL is converted to a Python object that was created via urlparse.
"""
require_https_on_cloud = False
def __init__(self, name, title, description, none_allowed=False, empty_allowed=True,
required_on_create=None, required_on_edit=None, require_https_on_cloud=False):
super(URLField, self).__init__(name, title, description, none_allowed,
empty_allowed, required_on_create, required_on_edit)
self.require_https_on_cloud = require_https_on_cloud
@classmethod
def parse_url(cls, value, name):
"""
Parse a URL and generation an exception if it is invalid.BaseException
Otherwise, return a parsed URL (via urlparse).
"""
parsed_value = urlparse(value)
if parsed_value.hostname is None or len(parsed_value.hostname) <= 0:
raise FieldValidationException("The value of '%s' for the '%s' parameter does not contain a host name" % (str(value), name))
if parsed_value.scheme not in ["http", "https"]:
raise FieldValidationException("The value of '%s' for the '%s' parameter does not contain a valid protocol (only http and https are supported)" % (str(value), name))
return parsed_value
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
parsed_value = URLField.parse_url(value.strip(), self.name)
if self.require_https_on_cloud and parsed_value.scheme == "http" and session_key is not None and ServerInfo.is_on_cloud(session_key):
raise FieldValidationException("The value of '%s' for the '%s' parameter must use encryption (be HTTPS not HTTP)" % (str(value), self.name))
return parsed_value
def to_string(self, value):
return value.geturl()
class DurationField(Field):
"""
The duration field represents a duration as represented by a string such as 1d for a 24 hour
period.
The string is converted to an integer indicating the number of seconds.
"""
DURATION_RE = re.compile("(?P<duration>[0-9]+)\s*(?P<units>[a-z]*)", re.IGNORECASE)
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
UNITS = {
'w' : WEEK,
'week' : WEEK,
'd' : DAY,
'day' : DAY,
'h' : HOUR,
'hour' : HOUR,
'm' : MINUTE,
'min' : MINUTE,
'minute' : MINUTE,
's' : 1
}
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
# Parse the duration
duration_match = DurationField.DURATION_RE.match(value)
# Make sure the duration could be parsed
if duration_match is None:
raise FieldValidationException("The value of '%s' for the '%s' parameter is not a valid duration" % (str(value), self.name))
# Get the units and duration
match_dict = duration_match.groupdict()
units = match_dict['units']
# Parse the value provided
try:
duration = int(match_dict['duration'])
except ValueError:
raise FieldValidationException("The duration '%s' for the '%s' parameter is not a valid number" % (match_dict['duration'], self.name))
# Make sure the units are valid
if len(units) > 0 and units not in DurationField.UNITS:
raise FieldValidationException("The unit '%s' for the '%s' parameter is not a valid unit of duration" % (units, self.name))
# Convert the units to seconds
if len(units) > 0:
return duration * DurationField.UNITS[units]
else:
return duration
def to_string(self, value):
return str(value)
class DeprecatedField(Field):
"""
Represents a field that is no longer used. This should be used when you want the input to pass
validation with arguments that are no longer used.
"""
def __init__(self, name, title, description, none_allowed=True, empty_allowed=True,
required_on_create=False, required_on_edit=False):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human readable description of the field (e.g. "The IP or domain name of the database server")
none_allowed -- Is a value of none allowed?
empty_allowed -- Is an empty string allowed?
required_on_create -- Is this field required when creating?
required_on_edit -- Is this field required when editing?
"""
super(DeprecatedField, self).__init__(name, title, description,
none_allowed=none_allowed,
empty_allowed=empty_allowed,
required_on_create=required_on_create,
required_on_edit=required_on_edit)
def to_python(self, value, session_key=None):
return None
def to_string(self, value):
return ""
class FilePathField(Field):
'''
Represents a path to file.
'''
def __init__(self, name, title, description, none_allowed=False, empty_allowed=True,
required_on_create=None, required_on_edit=None, validate_file_existence=True):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human readable description of the field (e.g. "The IP or domain name
of the database server")
none_allowed -- Is a value of none allowed?
empty_allowed -- Is an empty string allowed?
required_on_create -- Is this field required when creating?
required_on_edit -- Is this field required when editing?
validate_file_existence -- If true, this field will generate an error if the file doesn't exist
"""
super(FilePathField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit)
self.validate_file_existence = validate_file_existence
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
# Don't bother validating if the parameter wasn't provided
if value is None or len(value.strip()) == 0:
return value
# Resolve the file path as necessary
resolved_path = None
if value is not None:
if os.path.isabs(value) or UF_MODE:
resolved_path = value
else:
path = os.path.join(make_splunkhome_path([value]))
resolved_path = path
# Validate the file existence if requested
if self.validate_file_existence and not os.path.isfile(resolved_path):
raise FieldValidationException("The parameter '%s' is not a valid path; '%s' does not exist" % (self.name, resolved_path))
return resolved_path
def to_string(self, value):
return value
class DomainNameField(Field):
"""
A validator that accepts domain names.
"""
def is_valid_hostname(self, dn):
"""
Determine if the given hostname is valid.
See https://stackoverflow.com/questions/2532053/validate-a-hostname-string
"""
if dn.endswith('.'):
dn = dn[:-1]
if len(dn) < 1 or len(dn) > 253:
return False
ldh_re = re.compile('^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$',
re.IGNORECASE)
return all(ldh_re.match(x) for x in dn.split('.'))
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is not None:
if not self.is_valid_hostname(value):
raise FieldValidationException("The value of '%s' for the '%s' parameter is not a valid domain name" % (value, self.name))
return value
else:
return None
class MultiValidatorField(Field):
def __init__(self, name, title, description, none_allowed=False, empty_allowed=True,
required_on_create=None, required_on_edit=None, validators=None, default_message=None):
"""
Create the field.
Arguments:
name -- Set the name of the field (e.g. "database_server")
title -- Set the human readable title (e.g. "Database server")
description -- Set the human readable description of the field (e.g. "The IP or domain name
of the database server")
none_allowed -- Is a value of none allowed?
empty_allowed -- Is an empty string allowed?
required_on_create -- Is this field required when creating?
required_on_edit -- Is this field required when editing?
validate_file_existence -- If true, this field will generate an error if the file doesn't exist
"""
super(MultiValidatorField, self).__init__(name, title, description, none_allowed, empty_allowed, required_on_create, required_on_edit)
# Stop if no validators were supplied
if validators is None or len(validators) == 0:
raise Exception("A list of the validators is required for the MultiValidatorField to test against")
# Here is where all of the instances of the validators will be stored
self.validators = []
# Construct the validator instances
for validator in validators:
self.validators.append(validator(self.name, self.title, self.description, self.none_allowed, self.empty_allowed, self.required_on_create, self.required_on_edit))
# This will point to the last validator instance that accepted the last value
self.last_used_validator = None
# Persist the error message
self.default_message = default_message
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is not None:
messages =[]
for validator in self.validators:
try:
python_value = validator.to_python(value, session_key)
self.last_used_validator = validator
return python_value
except FieldValidationException as e:
messages.append(str(e))
# Generate an exception since the field could not be validated
if self.default_message is None:
raise FieldValidationException(";".join(messages))
else:
raise FieldValidationException(self.default_message)
else:
return None
def to_string(self, value):
if value is not None:
return self.last_used_validator.to_string(value)
return ""
class IPNetworkField(Field):
"""
A validator that accepts IP addresses.
"""
def to_python(self, value, session_key=None):
Field.to_python(self, value, session_key)
if value is not None:
# Convert the incoming string to bytes
# For Python 2, str works fine since it is just bytes. Python 3 defaults to unicode which needs to be converted.
try:
unicode
if not isinstance(value, unicode):
value = unicode(value)
# The interpreter is Python 2
except NameError:
# The interpreter is Python 3, it is unicode already
pass
try:
return ip_network(value, strict=False)
except ValueError as exception:
raise FieldValidationException(str(exception))
else:
return None
def to_string(self, value):
if value is not None:
# Get the main address if this is a single address
if value.num_addresses == 1:
return str(value.network_address)
else:
return str(value)
return ""
| nilq/baby-python | python |
import os
import pandas as pd
import pytest
from probatus.feature_elimination import EarlyStoppingShapRFECV, ShapRFECV
from probatus.utils import preprocess_labels
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import get_scorer
from sklearn.model_selection import RandomizedSearchCV, StratifiedGroupKFold, StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
@pytest.fixture(scope="function")
def X():
"""
Fixture for X.
"""
return pd.DataFrame(
{
"col_1": [1, 1, 1, 1, 1, 1, 1, 0],
"col_2": [0, 0, 0, 0, 0, 0, 0, 1],
"col_3": [1, 0, 1, 0, 1, 0, 1, 0],
},
index=[1, 2, 3, 4, 5, 6, 7, 8],
)
@pytest.fixture(scope="session")
def catboost_classifier_class():
"""This fixture allows to reuse the import of the CatboostClassifier class across different tests.
It is equivalent to importing the package at the beginning of the file.
Importing catboost multiple times results in a ValueError: I/O operation on closed file.
"""
from catboost import CatBoostClassifier
return CatBoostClassifier
@pytest.fixture(scope="function")
def y():
"""
Fixture for y.
"""
return pd.Series([1, 0, 1, 0, 1, 0, 1, 0], index=[1, 2, 3, 4, 5, 6, 7, 8])
@pytest.fixture(scope="function")
def sample_weight():
"""
Fixture for sample_weight.
"""
return pd.Series([1, 1, 1, 1, 1, 1, 1, 1], index=[1, 2, 3, 4, 5, 6, 7, 8])
@pytest.fixture(scope="function")
def groups():
"""
Fixture for groups.
"""
return pd.Series(["grp1", "grp1", "grp1", "grp1", "grp2", "grp2", "grp2", "grp2"], index=[1, 2, 3, 4, 5, 6, 7, 8])
def test_shap_rfe_randomized_search(X, y, capsys):
"""
Test with RandomizedSearchCV.
"""
clf = DecisionTreeClassifier(max_depth=1)
param_grid = {"criterion": ["gini"], "min_samples_split": [1, 2]}
search = RandomizedSearchCV(clf, param_grid, cv=2, n_iter=2)
with pytest.warns(None) as record:
shap_elimination = ShapRFECV(search, step=0.8, cv=2, scoring="roc_auc", n_jobs=4, random_state=1)
report = shap_elimination.fit_compute(X, y)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
assert report.shape[0] == 2
assert shap_elimination.get_reduced_features_set(1) == ["col_3"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was at least 2 for the verbose (2 generated by probatus + possibly more by SHAP)
assert len(record) >= 2
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
def test_shap_rfe(X, y, sample_weight, capsys):
"""
Test with ShapRFECV.
"""
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
with pytest.warns(None) as record:
shap_elimination = ShapRFECV(
clf,
random_state=1,
step=1,
cv=2,
scoring="roc_auc",
n_jobs=4,
)
shap_elimination = shap_elimination.fit(
X, y, sample_weight=sample_weight, approximate=True, check_additivity=False
)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 3
assert shap_elimination.get_reduced_features_set(1) == ["col_3"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
def test_shap_rfe_group_cv(X, y, groups, sample_weight, capsys):
"""
Test ShapRFECV with StratifiedGroupKFold.
"""
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
cv = StratifiedGroupKFold(n_splits=2, shuffle=True, random_state=1)
with pytest.warns(None) as record:
shap_elimination = ShapRFECV(
clf,
random_state=1,
step=1,
cv=cv,
scoring="roc_auc",
n_jobs=4,
)
shap_elimination = shap_elimination.fit(
X, y, groups=groups, sample_weight=sample_weight, approximate=True, check_additivity=False
)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 3
assert shap_elimination.get_reduced_features_set(1) == ["col_3"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
def test_shap_pipeline_error(X, y, capsys):
"""
Test with ShapRFECV for pipelines.
"""
clf = Pipeline(
[
("scaler", StandardScaler()),
("dt", DecisionTreeClassifier(max_depth=1, random_state=1)),
]
)
with pytest.raises(TypeError):
shap_elimination = ShapRFECV(
clf,
random_state=1,
step=1,
cv=2,
scoring="roc_auc",
n_jobs=4,
)
shap_elimination = shap_elimination.fit(X, y, approximate=True, check_additivity=False)
def test_shap_rfe_linear_model(X, y, capsys):
"""
Test ShapRFECV with linear model.
"""
clf = LogisticRegression(C=1, random_state=1)
with pytest.warns(None) as record:
shap_elimination = ShapRFECV(clf, random_state=1, step=1, cv=2, scoring="roc_auc", n_jobs=4)
shap_elimination = shap_elimination.fit(X, y)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 3
assert shap_elimination.get_reduced_features_set(1) == ["col_3"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
def test_shap_rfe_svm(X, y, capsys):
"""
Test with ShapRFECV with SVM.
"""
clf = SVC(C=1, kernel="linear", probability=True)
with pytest.warns(None) as record:
shap_elimination = ShapRFECV(clf, random_state=1, step=1, cv=2, scoring="roc_auc", n_jobs=4)
shap_elimination = shap_elimination.fit(X, y)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 3
assert shap_elimination.get_reduced_features_set(1) == ["col_3"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
def test_shap_rfe_cols_to_keep(X, y, capsys):
"""
Test for shap_rfe_cv with feautures to keep parameter.
"""
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
with pytest.warns(None) as record:
shap_elimination = ShapRFECV(
clf,
random_state=1,
step=2,
cv=2,
scoring="roc_auc",
n_jobs=4,
min_features_to_select=1,
)
shap_elimination = shap_elimination.fit(X, y, columns_to_keep=["col_2", "col_3"])
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 2
reduced_feature_set = set(shap_elimination.get_reduced_features_set(num_features=2))
assert reduced_feature_set == set(["col_2", "col_3"])
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
def test_shap_rfe_randomized_search_cols_to_keep(X, y, capsys):
"""
Test with ShapRFECV with column to keep param.
"""
clf = DecisionTreeClassifier(max_depth=1)
param_grid = {"criterion": ["gini"], "min_samples_split": [1, 2]}
search = RandomizedSearchCV(clf, param_grid, cv=2, n_iter=2)
with pytest.warns(None) as record:
shap_elimination = ShapRFECV(search, step=0.8, cv=2, scoring="roc_auc", n_jobs=4, random_state=1)
report = shap_elimination.fit_compute(X, y, columns_to_keep=["col_2", "col_3"])
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
assert report.shape[0] == 2
reduced_feature_set = set(shap_elimination.get_reduced_features_set(num_features=2))
assert reduced_feature_set == set(["col_2", "col_3"])
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was at least 2 for the verbose (2 generated by probatus + possibly more by SHAP)
assert len(record) >= 2
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
def test_calculate_number_of_features_to_remove():
"""
Test with ShapRFECV with n features to remove.
"""
assert 3 == ShapRFECV._calculate_number_of_features_to_remove(
current_num_of_features=10, num_features_to_remove=3, min_num_features_to_keep=5
)
assert 3 == ShapRFECV._calculate_number_of_features_to_remove(
current_num_of_features=8, num_features_to_remove=5, min_num_features_to_keep=5
)
assert 0 == ShapRFECV._calculate_number_of_features_to_remove(
current_num_of_features=5, num_features_to_remove=1, min_num_features_to_keep=5
)
assert 4 == ShapRFECV._calculate_number_of_features_to_remove(
current_num_of_features=5, num_features_to_remove=7, min_num_features_to_keep=1
)
def test_get_feature_shap_values_per_fold(X, y):
"""
Test with ShapRFECV with features per fold.
"""
clf = DecisionTreeClassifier(max_depth=1)
shap_elimination = ShapRFECV(clf)
(shap_values, train_score, test_score,) = shap_elimination._get_feature_shap_values_per_fold(
X,
y,
clf,
train_index=[2, 3, 4, 5, 6, 7],
val_index=[0, 1],
scorer=get_scorer("roc_auc"),
)
assert test_score == 1
assert train_score > 0.9
assert shap_values.shape == (2, 3)
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_complex_dataset(complex_data, complex_lightgbm):
"""
Test on complex dataset.
"""
X, y = complex_data
param_grid = {
"n_estimators": [5, 7, 10],
"num_leaves": [3, 5, 7, 10],
}
search = RandomizedSearchCV(complex_lightgbm, param_grid, n_iter=1)
shap_elimination = ShapRFECV(clf=search, step=1, cv=10, scoring="roc_auc", n_jobs=3, verbose=50)
with pytest.warns(None) as record:
report = shap_elimination.fit_compute(X, y)
assert report.shape[0] == X.shape[1]
assert len(record) >= 2
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_shap_rfe_early_stopping_lightGBM(complex_data, capsys):
"""
Test EarlyStoppingShapRFECV with a LGBMClassifier.
"""
from lightgbm import LGBMClassifier
clf = LGBMClassifier(n_estimators=200, max_depth=3)
X, y = complex_data
with pytest.warns(None) as record:
shap_elimination = EarlyStoppingShapRFECV(
clf,
random_state=1,
step=1,
cv=10,
scoring="roc_auc",
n_jobs=4,
early_stopping_rounds=5,
eval_metric="auc",
)
shap_elimination = shap_elimination.fit(X, y, approximate=False, check_additivity=False)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 5
assert shap_elimination.get_reduced_features_set(1) == ["f5"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_shap_rfe_early_stopping_XGBoost(complex_data, capsys):
"""
Test EarlyStoppingShapRFECV with a LGBMClassifier.
"""
from xgboost import XGBClassifier
clf = XGBClassifier(n_estimators=200, max_depth=3, use_label_encoder=False, random_state=42)
X, y = complex_data
X["f1_categorical"] = X["f1_categorical"].astype(float)
with pytest.warns(None) as record:
shap_elimination = EarlyStoppingShapRFECV(
clf,
random_state=1,
step=1,
cv=10,
scoring="roc_auc",
n_jobs=4,
early_stopping_rounds=5,
eval_metric="auc",
)
shap_elimination = shap_elimination.fit(X, y, approximate=False, check_additivity=False)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 5
assert shap_elimination.get_reduced_features_set(1) == ["f4"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
# For now this test fails, catboost has issues with categorical variables and
@pytest.mark.xfail
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_shap_rfe_early_stopping_CatBoost(complex_data, capsys, catboost_classifier_class):
"""
Test EarlyStoppingShapRFECV with a CatBoostClassifier.
"""
clf = catboost_classifier_class(random_seed=42)
X, y = complex_data
with pytest.warns(None) as record:
shap_elimination = EarlyStoppingShapRFECV(
clf,
random_state=1,
step=1,
cv=10,
scoring="roc_auc",
n_jobs=4,
early_stopping_rounds=5,
eval_metric="auc",
)
shap_elimination = shap_elimination.fit(X, y, approximate=False, check_additivity=False)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 5
assert shap_elimination.get_reduced_features_set(1)[0] in ["f4", "f5"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_shap_rfe_randomized_search_early_stopping_lightGBM(complex_data):
"""
Test EarlyStoppingShapRFECV with RandomizedSearchCV and a LGBMClassifier on complex dataset.
"""
from lightgbm import LGBMClassifier
clf = LGBMClassifier(n_estimators=200)
X, y = complex_data
param_grid = {
"max_depth": [3, 4, 5],
}
search = RandomizedSearchCV(clf, param_grid, cv=2, n_iter=2)
with pytest.warns(None) as record:
shap_elimination = EarlyStoppingShapRFECV(
search,
step=1,
cv=10,
scoring="roc_auc",
early_stopping_rounds=5,
eval_metric="auc",
n_jobs=4,
verbose=50,
random_state=1,
)
report = shap_elimination.fit_compute(X, y)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
assert report.shape[0] == X.shape[1]
assert shap_elimination.get_reduced_features_set(1) == ["f5"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was at least 3 for the verbose (2 generated by probatus + possibly more by SHAP)
assert len(record) >= 3
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_get_feature_shap_values_per_fold_early_stopping_lightGBM(complex_data):
"""
Test with ShapRFECV with features per fold.
"""
from lightgbm import LGBMClassifier
clf = LGBMClassifier(n_estimators=200, max_depth=3)
X, y = complex_data
y = preprocess_labels(y, y_name="y", index=X.index)
shap_elimination = EarlyStoppingShapRFECV(clf, early_stopping_rounds=5)
(shap_values, train_score, test_score,) = shap_elimination._get_feature_shap_values_per_fold(
X,
y,
clf,
train_index=list(range(5, 50)),
val_index=[0, 1, 2, 3, 4],
scorer=get_scorer("roc_auc"),
)
assert test_score > 0.6
assert train_score > 0.6
assert shap_values.shape == (5, 5)
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_get_feature_shap_values_per_fold_early_stopping_CatBoost(complex_data, catboost_classifier_class):
"""
Test with ShapRFECV with features per fold.
"""
clf = catboost_classifier_class(random_seed=42)
X, y = complex_data
X["f1_categorical"] = X["f1_categorical"].astype(str).astype("category")
y = preprocess_labels(y, y_name="y", index=X.index)
shap_elimination = EarlyStoppingShapRFECV(clf, early_stopping_rounds=5)
(shap_values, train_score, test_score,) = shap_elimination._get_feature_shap_values_per_fold(
X,
y,
clf,
train_index=list(range(5, 50)),
val_index=[0, 1, 2, 3, 4],
scorer=get_scorer("roc_auc"),
)
assert test_score > 0
assert train_score > 0.6
assert shap_values.shape == (5, 5)
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_get_feature_shap_values_per_fold_early_stopping_XGBoost(complex_data):
"""
Test with ShapRFECV with features per fold.
"""
from xgboost import XGBClassifier
clf = XGBClassifier(n_estimators=200, max_depth=3, use_label_encoder=False, random_state=42)
X, y = complex_data
X["f1_categorical"] = X["f1_categorical"].astype(float)
y = preprocess_labels(y, y_name="y", index=X.index)
shap_elimination = EarlyStoppingShapRFECV(clf, early_stopping_rounds=5)
(shap_values, train_score, test_score,) = shap_elimination._get_feature_shap_values_per_fold(
X,
y,
clf,
train_index=list(range(5, 50)),
val_index=[0, 1, 2, 3, 4],
scorer=get_scorer("roc_auc"),
)
assert test_score > 0
assert train_score > 0.6
assert shap_values.shape == (5, 5)
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_EarlyStoppingShapRFECV_no_categorical(complex_data):
"""Test EarlyStoppingShapRFECV when no categorical features are present."""
from lightgbm import LGBMClassifier
model = LGBMClassifier(n_estimators=50, max_depth=3, num_leaves=3)
shap_elimination = EarlyStoppingShapRFECV(
clf=model,
step=0.33,
cv=5,
scoring="accuracy",
eval_metric="logloss",
early_stopping_rounds=5,
)
X, y = complex_data
X = X.drop(columns=["f1_categorical"])
report = shap_elimination.fit_compute(X, y, feature_perturbation="tree_path_dependent")
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
assert report.shape[0] == X.shape[1]
assert shap_elimination.get_reduced_features_set(1) == ["f5"]
_ = shap_elimination.plot(show=False)
@pytest.mark.skipif(os.environ.get("SKIP_LIGHTGBM") == "true", reason="LightGBM tests disabled")
def test_LightGBM_stratified_kfold():
"""
Test added to check for https://github.com/ing-bank/probatus/issues/170.
"""
from lightgbm import LGBMClassifier
X = pd.DataFrame(
[
[1, 2, 3, 4, 5, 101, 102, 103, 104, 105],
[-1, -2, 2, -5, -7, 1, 2, 5, -1, 3],
["a", "b"] * 5, # noisy categorical will dropped first
]
).transpose()
X[2] = X[2].astype("category")
X[1] = X[1].astype("float")
X[0] = X[0].astype("float")
y = [0] * 5 + [1] * 5
model = LGBMClassifier()
n_iter = 2
n_folds = 3
for _ in range(n_iter):
skf = StratifiedKFold(n_folds, shuffle=True, random_state=42)
shap_elimination = EarlyStoppingShapRFECV(
clf=model,
step=1 / (n_iter + 1),
cv=skf,
scoring="accuracy",
eval_metric="logloss",
early_stopping_rounds=5,
)
report = shap_elimination.fit_compute(X, y, feature_perturbation="tree_path_dependent")
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
assert report.shape[0] == X.shape[1]
shap_elimination.plot(show=False)
| nilq/baby-python | python |
# -*- coding: utf8 -*-
from base import Stock
class Uzmanpara(Stock):
stockURL = "http://uzmanpara.milliyet.com.tr/borsa/hisse-senetleri/{0}/"
priceQuery = '.realTime > .price-arrow-down, .realTime > .price-arrow-up'
volumeQuery = '.realTime table tr td'
timezone = "Europe/Istanbul"
@classmethod
def extractVolume(cls, d):
return d(cls.volumeQuery)[7].text[1:].replace(".", "")
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import sys
from pkg_resources import load_entry_point
from subprocess import check_call
def main():
check_call([sys.executable, 'setup.py', 'build_ext', '--inplace'])
if '--with-coverage' not in sys.argv:
sys.argv.extend(('--with-coverage', '--cover-package=cg'))
sys.exit(
load_entry_point('nose', 'console_scripts', 'nosetests')()
)
if __name__ == '__main__':
main()
| nilq/baby-python | python |
"""Tests for ht.events.manager module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Third Party
import pytest
# Houdini Toolbox
import ht.events.manager
from ht.events.event import HoudiniEvent
from ht.events.group import HoudiniEventGroup
from ht.events.item import HoudiniEventItem
# =============================================================================
# FIXTURES
# =============================================================================
@pytest.fixture
def init_manager(mocker):
"""Fixture to initialize a manager."""
mocker.patch.object(
ht.events.manager.HoudiniEventManager, "__init__", lambda x: None
)
def _create():
return ht.events.manager.HoudiniEventManager()
return _create
# =============================================================================
# TESTS
# =============================================================================
class Test_HoudiniEventManager:
"""Test ht.events.manager.HoudiniEventManager class."""
def test___init__(self):
"""Test object initialization."""
manager = ht.events.manager.HoudiniEventManager()
assert manager._data == {}
assert manager._events == {}
assert manager._event_states == {}
# Properties
def test_data(self, init_manager, mocker):
"""Test the 'data' property"""
mock_value = mocker.MagicMock(spec=dict)
manager = init_manager()
manager._data = mock_value
assert manager.data == mock_value
def test_events(self, init_manager, mocker):
"""Test the 'events' property"""
mock_event = mocker.MagicMock(spec=HoudiniEvent)
events = {mocker.MagicMock(spec=str): mock_event}
manager = init_manager()
manager._events = events
assert manager.events == events
# Methods
# _disable_events
def test__disable_events__all(self, init_manager, mocker):
"""Test disabling all events."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_event1 = mocker.MagicMock(spec=HoudiniEvent)
mock_enabled1 = mocker.PropertyMock(return_value=False)
type(mock_event1).enabled = mock_enabled1
mock_event2 = mocker.MagicMock(spec=HoudiniEvent)
mock_enabled2 = mocker.PropertyMock(return_value=True)
type(mock_event2).enabled = mock_enabled2
mock_events.return_value = {
mock_event1.name: mock_event1,
mock_event2.name: mock_event2,
}
manager = init_manager()
manager._event_states = {}
manager._disable_events()
# Each event should have it's enabled property accessed twice:
# once to store the current value and then to set the value to False
mock_enabled1.assert_has_calls([mocker.call(), mocker.call(False)])
mock_enabled2.assert_has_calls([mocker.call(), mocker.call(False)])
assert not manager._event_states[mock_event1.name]
assert manager._event_states[mock_event2.name]
def test__disable_events__specific_names(self, init_manager, mocker):
"""Test disabling specific events."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_event1 = mocker.MagicMock(spec=HoudiniEvent)
mock_enabled1 = mocker.PropertyMock(return_value=True)
type(mock_event1).enabled = mock_enabled1
mock_event2 = mocker.MagicMock(spec=HoudiniEvent)
mock_enabled2 = mocker.PropertyMock(return_value=True)
type(mock_event2).enabled = mock_enabled2
mock_events.return_value = {
mock_event1.name: mock_event1,
mock_event2.name: mock_event2,
}
manager = init_manager()
manager._event_states = {}
manager._disable_events(names=[mock_event2.name])
# Event 1's enabled property should not have been accessed.
mock_enabled1.assert_not_called()
# Event 2's should have been accessed to get the current value
# and once to disable it.
mock_enabled2.assert_has_calls([mocker.call(), mocker.call(False)])
assert manager._event_states[mock_event2.name]
assert len(manager._event_states) == 1
def test__restore_events(self, init_manager, mocker):
"""Test restoring disabled events."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_event1 = mocker.MagicMock(spec=HoudiniEvent)
mock_enabled1 = mocker.PropertyMock(return_value=False)
type(mock_event1).enabled = mock_enabled1
mock_event2 = mocker.MagicMock(spec=HoudiniEvent)
mock_enabled2 = mocker.PropertyMock(return_value=False)
type(mock_event2).enabled = mock_enabled2
mock_events.return_value = {
mock_event1.name: mock_event1,
mock_event2.name: mock_event2,
}
mock_states = mocker.MagicMock(spec=dict)
states = {mock_event1.name: False, mock_event2.name: True}
mock_states.items.return_value = list(states.items())
manager = init_manager()
manager._event_states = mock_states
manager._restore_events()
# Event 1's enable should have been set to False, 2's True
mock_enabled1.assert_has_calls([mocker.call(False)])
mock_enabled2.assert_has_calls([mocker.call(True)])
mock_states.clear.assert_called_once()
def test_create_event(self, init_manager, mocker):
"""Test creating an event."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_factory = mocker.patch("ht.events.manager.HoudiniEventFactory")
mock_event = mocker.MagicMock(spec=HoudiniEvent)
mock_factory.get_event_type.return_value = mock_event
events = {}
mock_events.return_value = events
manager = init_manager()
mock_name = mocker.MagicMock(spec=str)
result = manager.create_event(mock_name)
assert result == mock_event
assert mock_event in list(events.values())
mock_factory.get_event_type.assert_called_with(mock_name)
def test_event_disabler(self, init_manager, mocker):
"""Test the event_disabler context manager."""
mock_disable = mocker.patch.object(
ht.events.manager.HoudiniEventManager, "_disable_events"
)
mock_restore = mocker.patch.object(
ht.events.manager.HoudiniEventManager, "_restore_events"
)
manager = init_manager()
mock_names = mocker.MagicMock(spec=tuple)
with manager.event_disabler(names=mock_names):
pass
mock_disable.assert_called_with(mock_names)
mock_restore.assert_called_once()
# register_event_group
def test_register_event_group__invalid_type(self, init_manager, mocker):
"""Test registering an event group with an invalid object type."""
# Don't spec so it will fail isinstance(EventGroup)
mock_group = mocker.MagicMock()
manager = init_manager()
with pytest.raises(TypeError):
manager.register_event_group(mock_group)
def test_register_event_group__single_items(self, init_manager, mocker):
"""Test registering a group where no event of that name has been created."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_create = mocker.patch.object(
ht.events.manager.HoudiniEventManager, "create_event"
)
mock_item1 = mocker.MagicMock(spec=HoudiniEventItem)
mock_item2 = mocker.MagicMock(spec=HoudiniEventItem)
mock_event_name1 = mocker.MagicMock(spec=str)
mock_event_name2 = mocker.MagicMock(spec=str)
event_map = {mock_event_name1: mock_item1, mock_event_name2: mock_item2}
mock_group = mocker.MagicMock(spec=HoudiniEventGroup)
type(mock_group).event_map = mocker.PropertyMock(return_value=event_map)
mock_event1 = mocker.MagicMock(spec=HoudiniEvent)
mock_event2 = mocker.MagicMock(spec=HoudiniEvent)
events = {mock_event_name2: mock_event2}
mock_events.return_value = events
mock_create.side_effect = lambda name: events.setdefault(name, mock_event1)
manager = init_manager()
manager.register_event_group(mock_group)
mock_create.assert_called_with(mock_event_name1)
mock_event1.register_item.assert_called_with(mock_item1)
mock_event2.register_item.assert_called_with(mock_item2)
def test_register_event_group__item_lists(self, init_manager, mocker):
"""Test registering a group where no event of that name has been created."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_create = mocker.patch.object(
ht.events.manager.HoudiniEventManager, "create_event"
)
mock_item1 = mocker.MagicMock(spec=HoudiniEventItem)
mock_item2 = mocker.MagicMock(spec=HoudiniEventItem)
mock_event_name1 = mocker.MagicMock(spec=str)
mock_event_name2 = mocker.MagicMock(spec=str)
event_map = {mock_event_name1: [mock_item1], mock_event_name2: [mock_item2]}
mock_group = mocker.MagicMock(spec=HoudiniEventGroup)
type(mock_group).event_map = mocker.PropertyMock(return_value=event_map)
event_name1 = mock_event_name1
mock_event1 = mocker.MagicMock(spec=HoudiniEvent)
mock_event2 = mocker.MagicMock(spec=HoudiniEvent)
events = {mock_event_name2: mock_event2}
mock_events.return_value = events
mock_create.side_effect = lambda name: events.setdefault(name, mock_event1)
manager = init_manager()
manager.register_event_group(mock_group)
mock_create.assert_called_with(event_name1)
mock_event1.register_item.assert_called_with(mock_item1)
mock_event2.register_item.assert_called_with(mock_item2)
# register_item
def test_register_item__invalid_type(self, init_manager, mocker):
"""Test registering an invalid type."""
# Don't spec so it will fail isinstance(HoudiniEventItem)
manager = init_manager()
with pytest.raises(TypeError):
manager.register_item(None, mocker.MagicMock(spec=str))
def test_register_item__new_event(self, init_manager, mocker):
"""Test registering an item whose event does not exist yet."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_create = mocker.patch.object(
ht.events.manager.HoudiniEventManager, "create_event"
)
mock_event_name = mocker.MagicMock(spec=str)
mock_event = mocker.MagicMock(spec=HoudiniEvent)
events = {}
mock_events.return_value = events
mock_create.side_effect = lambda name: events.setdefault(name, mock_event)
mock_item = mocker.MagicMock(spec=HoudiniEventItem)
manager = init_manager()
manager.register_item(mock_item, mock_event_name)
mock_create.assert_called_with(mock_event_name)
mock_event.register_item.assert_called_with(mock_item)
def test_register_item__existing_event(self, init_manager, mocker):
"""Test registering an item to an existing event."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_create = mocker.patch.object(
ht.events.manager.HoudiniEventManager, "create_event"
)
mock_event_name = mocker.MagicMock(spec=str)
mock_event = mocker.MagicMock(spec=HoudiniEvent)
mock_events.return_value = {mock_event_name: mock_event}
mock_item = mocker.MagicMock(spec=HoudiniEventItem)
manager = init_manager()
manager.register_item(mock_item, mock_event_name)
mock_create.assert_not_called()
mock_event.register_item.assert_called_with(mock_item)
# run_event
def test_run_event__no_event(self, init_manager, mocker):
"""Test running an event where there are no matching events."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_event_name = mocker.MagicMock(spec=str)
mock_events.return_value = {}
scriptargs = {}
manager = init_manager()
manager.run_event(mock_event_name, scriptargs)
assert scriptargs == {}
def test_run_event__no_scriptargs(self, init_manager, mocker):
"""Test running an event with no particular args."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_event_name = mocker.MagicMock(spec=str)
mock_event = mocker.MagicMock(spec=HoudiniEvent)
mock_events.return_value = {mock_event_name: mock_event}
manager = init_manager()
manager.run_event(mock_event_name)
scriptargs = {"_manager_": manager}
mock_event.run.assert_called_with(scriptargs)
def test_run_event__scriptargs(self, init_manager, mocker):
"""Test running an event while passing in args."""
mock_events = mocker.patch.object(
ht.events.manager.HoudiniEventManager,
"events",
new_callable=mocker.PropertyMock,
)
mock_event_name = mocker.MagicMock(spec=str)
mock_event = mocker.MagicMock(spec=HoudiniEvent)
mock_events.return_value = {mock_event_name: mock_event}
manager = init_manager()
scriptargs = {"key": "value"}
manager.run_event(mock_event_name, scriptargs)
expected_scriptargs = {"key": "value", "_manager_": manager}
mock_event.run.assert_called_with(expected_scriptargs)
assert scriptargs == expected_scriptargs
def test_register_event_group(mocker):
"""Test ht.events.manager.register_event_group."""
mock_manager = mocker.patch("ht.events.manager.EVENT_MANAGER")
mock_group = mocker.MagicMock(spec=HoudiniEventGroup)
ht.events.manager.register_event_group(mock_group)
mock_manager.register_event_group.assert_called_with(mock_group)
class Test_register_function:
"""Test ht.events.manager.register_function."""
def test_not_callable(self, mocker):
"""Test registering a non-callable object."""
mock_event_name = mocker.MagicMock(spec=str)
mock_item_name = mocker.MagicMock(spec=str)
mock_priority = mocker.MagicMock(spec=int)
mock_tags = mocker.MagicMock(spec=list)
with pytest.raises(TypeError):
ht.events.manager.register_function(
None, mock_event_name, mock_item_name, mock_priority, mock_tags
)
def test(self, mocker):
"""Test registering a callable object."""
mock_cls = mocker.patch("ht.events.manager.HoudiniEventItem", autospec=True)
mock_register_item = mocker.patch("ht.events.manager.register_item")
mock_func = mocker.MagicMock()
mock_event_name = mocker.MagicMock(spec=str)
mock_item_name = mocker.MagicMock(spec=str)
mock_priority = mocker.MagicMock(spec=int)
mock_tags = mocker.MagicMock(spec=list)
ht.events.manager.register_function(
mock_func, mock_event_name, mock_item_name, mock_priority, mock_tags
)
mock_cls.assert_called_with(
(mock_func,), mock_item_name, mock_priority, stat_tags=mock_tags
)
mock_register_item.assert_called_with(mock_cls.return_value, mock_event_name)
class Test_register_item:
"""Test ht.events.manager.register_item."""
def test_not_item(self, mocker):
"""Test registering an invalid type."""
mock_event_name = mocker.MagicMock(spec=str)
with pytest.raises(TypeError):
ht.events.manager.register_item(None, mock_event_name)
def test(self, mocker):
"""Test registering a valid item."""
mock_manager = mocker.patch("ht.events.manager.EVENT_MANAGER")
mock_event_name = mocker.MagicMock(spec=str)
mock_item = mocker.MagicMock(spec=HoudiniEventItem)
ht.events.manager.register_item(mock_item, mock_event_name)
mock_manager.register_item.assert_called_with(mock_item, mock_event_name)
def test_run_event(mocker):
"""Test ht.events.manager.run_event."""
mock_manager = mocker.patch("ht.events.manager.EVENT_MANAGER")
mock_event_name = mocker.MagicMock(spec=str)
mock_scriptargs = mocker.MagicMock(spec=dict)
ht.events.manager.run_event(mock_event_name, mock_scriptargs)
mock_manager.run_event.assert_called_with(mock_event_name, mock_scriptargs)
| nilq/baby-python | python |
from .. cupy_utils import to_numpy, trapz, xp
from ..utils import powerlaw
import numpy as np
from astropy.cosmology import Planck15
class PowerLawRedshift(object):
"""
Redshift model from Fishbach+ https://arxiv.org/abs/1805.10270
Note that this is deliberately off by a factor of dVc/dz
"""
def __init__(self):
self.zs_ = np.linspace(1e-3, 1, 1000)
self.zs = xp.asarray(self.zs_)
self.dvc_dz_ = (
Planck15.differential_comoving_volume(self.zs_).value * 4 * np.pi)
self.dvc_dz = xp.asarray(self.dvc_dz_)
self.cached_dvc_dz = None
def __call__(self, dataset, lamb):
p_z = powerlaw(1 + dataset['redshift'], alpha=(lamb - 1),
high=(1 + self.zs_[-1]), low=1)
try:
p_z *= self.cached_dvc_dz
except (TypeError, ValueError):
self._cache_dvc_dz(dataset['redshift'])
p_z *= self.cached_dvc_dz
p_z /= self.normalisation(lamb)
return p_z
def normalisation(self, lamb):
p_z_ = powerlaw(1 + self.zs, alpha=(lamb - 1),
high=(1 + self.zs_[-1]), low=1)
norm = trapz(p_z_ * self.dvc_dz, self.zs)
return norm
def _cache_dvc_dz(self, redshifts):
self.cached_dvc_dz = xp.asarray(np.interp(
to_numpy(redshifts), self.zs_, self.dvc_dz_))
power_law_redshift = PowerLawRedshift()
| nilq/baby-python | python |
from flask import Flask
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
with app.app_context():
import routes
import stats
if __name__ == "__main__":
app.config['DEBUG'] = True
app.run()
| nilq/baby-python | python |
from receptor_affinity.mesh import Mesh
from wait_for import TimedOutError
import time
import pytest
@pytest.yield_fixture(
scope="function",
params=[
"test/perf/flat-mesh.yaml",
"test/perf/tree-mesh.yaml",
"test/perf/random-mesh.yaml",
],
ids=["flat", "tree", "random"],
)
def mesh(request):
mesh = Mesh.load_mesh_from_file(request.param, use_diag_node=True)
try:
mesh.start(wait=True)
yield mesh
except TimedOutError:
raise
finally:
print(f"{time.time()} - Stopping current mesh")
print(mesh.nodes['controller'])
mesh.stop()
def test_pings_perf(mesh):
results = mesh.ping()
mesh.validate_ping_results(results)
| nilq/baby-python | python |
# Copyright 2021 Gakuto Furuya
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sudachipy import tokenizer
from sudachipy import dictionary
def main():
tokenizer_obj = dictionary.Dictionary().create()
mode = tokenizer.Tokenizer.SplitMode.C
while True:
sentence = input()
tokens = tokenizer_obj.tokenize(sentence, mode)
pekofied_sentence = ''
noun_flag = False
final_form_flag = False
for t in tokens:
if noun_flag:
if t.part_of_speech()[1] == '句点':
pekofied_sentence += 'ぺこ' + t.surface()
elif t.part_of_speech()[1] == '終助詞':
pekofied_sentence += 'ぺこ' + t.surface()
elif t.part_of_speech()[0] == '助動詞' and t.part_of_speech()[5] == '終止形-一般':
pekofied_sentence += 'ぺこ' + t.surface()
else:
pekofied_sentence += t.surface()
noun_flag = False
elif final_form_flag:
if t.part_of_speech()[0] == '助動詞':
pekofied_sentence += t.surface()
elif t.part_of_speech()[1] == '終助詞':
if t.dictionary_form() == 'じゃん':
pekofied_sentence += 'ぺこ' + t.surface()
else:
pekofied_sentence += t.surface()
elif t.part_of_speech()[1] == '接続助詞':
if t.dictionary_form() == 'と' or t.dictionary_form() == 'けれど':
pekofied_sentence += t.surface()
else:
pekofied_sentence += 'ぺこだ' + t.surface()
else:
pekofied_sentence += 'ぺこ' + t.surface()
final_form_flag = False
elif t.part_of_speech()[0] == '名詞':
pekofied_sentence += t.surface()
noun_flag = True
elif t.part_of_speech()[5] == '終止形-一般':
pekofied_sentence += t.surface()
final_form_flag = True
else:
pekofied_sentence += t.surface()
if noun_flag:
pekofied_sentence += 'ぺこ'
if final_form_flag:
pekofied_sentence += 'ぺこ'
print(pekofied_sentence)
if __name__ == '__main__':
main() | nilq/baby-python | python |
#!/usr/bin/env python
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, path)
import django
def manage_16ormore():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
def manage_15orless():
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
execute_manager(settings)
if __name__ == "__main__":
if django.VERSION > (1, 6):
manage_16ormore()
else:
manage_15orless()
| nilq/baby-python | python |
#Adding python objects to database
import sqlite3
from employee import Employee
#we are calling in the Employee class from the program which we made earlier, they must be in the same directory
conn=sqlite3.connect('sql.db')
c = conn.cursor()
#c.execute("""CREATE TABLE employees (
# first text,
# last text,
# pay integer
# )""")
emp_1 = Employee('John', 'Doe', 80000)
emp_2= Employee('Jane','Doe', 80000)
c.execute("INSERT INTO employees VALUES (?,?,?)", (emp_1.first,emp_1.last,emp_1.pay))
#here we are inserting the above instances into the database
c.execute("SELECT * FROM employees WHERE last=?", ('Grasshopper',))
#application of ? placeholder method
print(c.fetchall())
c.execute("SELECT * FROM employees WHERE last=:last", {'last':'Doe'})
#application of key placeholder method
print(c.fetchall())
conn.commit()
conn.close()
#executing this code will print the data which was inserted into the database
#check the output in the image file in this folder
| nilq/baby-python | python |
from XTax import Tax
import io
import unittest
import unittest.mock
class Test_XTax(unittest.TestCase):
def test_TaxInitYear(self):
MyTax = Tax(2019,autoload=False)
self.assertEqual(MyTax.Year, 2019)
@unittest.mock.patch('sys.stdout', new_callable=io.StringIO)
def test_TaxInitLog(self,mock_stdout):
MyTax = Tax(2019,loglevel=1,autoload=False)
OutputList = mock_stdout.getvalue().split('\n')
self.assertEqual(len(OutputList), 4)
self.assertEqual(OutputList[0], "Beginning of Init")
self.assertEqual(OutputList[2], "End of Init")
if __name__ == '__main__':
unittest.main() | nilq/baby-python | python |
import sys
try:
import threading
except ImportError:
import dummy_threading as threading
py32 = sys.version_info >= (3, 2)
py3k = sys.version_info >= (3, 0)
py2k = sys.version_info <= (3, 0)
if py3k:
string_types = str,
import itertools
itertools_filterfalse = itertools.filterfalse
if py32:
callable = callable
else:
def callable(fn):
return hasattr(fn, '__call__')
else:
string_types = basestring,
import itertools
itertools_filterfalse = itertools.ifilterfalse
callable = callable
| nilq/baby-python | python |
import sys
import Heuristic
import RandomProblem
import SolveProblem
def main():
# auto random file if no input
if len(sys.argv) != 4:
RandomProblem.createRandomProblem('rand_in.txt', 8, 16)
pf = SolveProblem.ARA('rand_in.txt', 'rand_log.txt', 3,
Heuristic.EuclidDistance, 5)
pf.writeSolution('rand_out.txt')
else:
pf = SolveProblem.ARA(sys.argv[1], 'ARA_log.txt', 3,
Heuristic.EuclidDistance, int(sys.argv[3]))
pf.writeSolution(sys.argv[2])
if __name__ == '__main__':
main() | nilq/baby-python | python |
"""Playbook Create"""
# standard library
import base64
import json
import logging
from typing import Any, Dict, Iterable, List, Optional, Union
# third-party
from pydantic import BaseModel
# first-party
from tcex.key_value_store import KeyValueApi, KeyValueRedis
from tcex.utils.utils import Utils
# get tcex logger
logger = logging.getLogger('tcex')
class PlaybookCreate:
"""Playbook Write ABC"""
def __init__(
self,
context: str,
key_value_store: Union[KeyValueApi, KeyValueRedis],
output_variables: list,
):
"""Initialize the class properties."""
self.context = context
self.key_value_store = key_value_store
self.output_variables = output_variables
# properties
self.log = logger
self.utils = Utils()
@staticmethod
def _check_iterable(value: str, validate: bool) -> None:
"""Raise an exception if value is not an Iterable.
Validation:
- not a dict (dicts are iterable)
- not a string (strings are iterable)
- is Iterable
"""
if validate is True and (isinstance(value, (dict, str)) or not isinstance(value, Iterable)):
raise RuntimeError('Invalid data provided for KeyValueArray.')
def _check_null(self, key: str, value: Any) -> bool:
"""Return True if key or value is null."""
invalid = False
if key is None:
self.log.warning('The provided key was None.')
invalid = True
if value is None:
self.log.warning(f'The provided value for key {key} was None.')
invalid = True
return invalid
def _check_requested(self, variable: str, when_requested: bool) -> None:
"""Return True if output variable was requested by downstream app."""
if when_requested is True and not self.is_requested(variable):
self.log.debug(f'Variable {variable} was NOT requested by downstream app.')
return False
return True
def _check_variable_type(self, variable: str, type_: str) -> bool:
"""Validate the correct type was passed to the method."""
if self.utils.get_playbook_variable_type(variable).lower() != type_.lower():
raise RuntimeError(
f'Invalid variable provided ({variable}), variable must be of type {type_}.'
)
@staticmethod
def _coerce_string_value(value: Union[bool, float, int, str]) -> str:
"""Return a string value from an bool or int."""
# coerce bool before int as python says a bool is an int
if isinstance(value, bool):
# coerce bool to str type
value = str(value).lower()
# coerce int to str type
if isinstance(value, (float, int)):
value = str(value)
return value
def _create_data(self, key: str, value: Any) -> None:
"""Write data to key value store."""
self.log.debug(f'writing variable {key.strip()}')
try:
return self.key_value_store.create(self.context, key.strip(), value)
except RuntimeError as e: # pragma: no cover
self.log.error(e)
return None
def _get_variable(self, key: str, variable_type: Optional[str] = None) -> str:
"""Return properly formatted variable.
A key can be provided as the variable key (e.g., app.output) or the
entire (e.g., #App:1234:app.output!String). The full variable is required
to create the record in the KV Store.
If a variable_type is provided an exact match will be found, however if no
variable type is known the first key match will be returned. Uniqueness of
keys is not guaranteed, but in more recent Apps it is the standard.
If no variable is found it means that the variable was not requested by the
any downstream Apps or could possible be formatted incorrectly.
"""
if not self.utils.is_playbook_variable(key):
# try to lookup the variable in the requested output variables.
for output_variable in self.output_variables:
variable_model = self.utils.get_playbook_variable_model(output_variable)
if variable_model.key == key and (
variable_type is None or variable_model.type == variable_type
):
# either an exact match, or first match
return output_variable
# not requested by downstream App or misconfigured
return None
# key was already a properly formatted variable
return key
@staticmethod
def _serialize_data(value: str) -> str:
"""Get the value from Redis if applicable."""
try:
return json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Invalid data provided, failed to serialize value ({e}).')
@staticmethod
def _process_object_types(
value: Union[BaseModel, dict],
validate: Optional[bool] = True,
allow_none: Optional[bool] = False,
) -> Dict[str, Any]:
"""Process object types (e.g., KeyValue, TCEntity)."""
types = (BaseModel, dict)
if allow_none is True:
types = (BaseModel, dict, type(None))
if validate and not isinstance(value, types):
raise RuntimeError(f'Invalid type provided for object type ({type(value)}).')
if isinstance(value, BaseModel):
value = value.dict(exclude_unset=True)
return value
@staticmethod
def is_key_value(data: dict) -> bool:
"""Return True if provided data has proper structure for Key Value."""
if not isinstance(data, dict):
return False
return all(x in data for x in ['key', 'value'])
def is_requested(self, variable: str) -> bool:
"""Return True if provided variable was requested by downstream App."""
return variable in self.output_variables
@staticmethod
def is_tc_entity(data: dict) -> bool:
"""Return True if provided data has proper structure for TC Entity."""
if not isinstance(data, dict):
return False
return all(x in data for x in ['id', 'value', 'type'])
def any(
self,
key: str,
value: Union[
'BaseModel', bytes, dict, str, List['BaseModel'], List[bytes], List[dict], List[str]
],
validate: Optional[bool] = True,
variable_type: Optional[str] = None,
when_requested: Optional[bool] = True,
) -> Optional[Union[bytes, dict, list, str]]:
"""Write the value to the keystore for all types.
This is a quick helper method, for more advanced features
the individual write methods should be used (e.g., binary).
Args:
key: The variable to write to the DB (e.g., app.colors).
value: The data to write to the DB.
variable_type: The variable type being written. Only required if not unique.
Returns:
(str): Result string of DB write.
"""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, variable_type)
if self._check_requested(variable, when_requested) is False:
return None
# get the type from the variable
variable_type = self.utils.get_playbook_variable_type(variable).lower()
# map type to create method
variable_type_map = {
'binary': self.binary,
'binaryarray': self.binary_array,
'keyvalue': self.key_value,
'keyvaluearray': self.key_value_array,
'string': self.string,
'stringarray': self.string_array,
'tcentity': self.tc_entity,
'tcentityarray': self.tc_entity_array,
# 'tcenhancedentity': self.tc_enhanced_entity_array,
}
return variable_type_map.get(variable_type, self.raw)(
variable, value, validate, when_requested
)
def binary(
self,
key: str,
value: bytes,
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> Optional[int]:
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, 'Binary')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'Binary')
# basic validation of value
if validate and not isinstance(value, bytes):
raise RuntimeError('Invalid data provided for Binary.')
# prepare value - playbook Binary fields are base64 encoded
value = base64.b64encode(value).decode('utf-8')
value = self._serialize_data(value)
return self._create_data(variable, value)
def binary_array(
self,
key: str,
value: List[bytes],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
):
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# validate array type provided
self._check_iterable(value, validate)
# convert key to variable if required
variable = self._get_variable(key, 'BinaryArray')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'BinaryArray')
# basic validation and prep of value
value_encoded = []
for v in value:
if v is not None:
if validate and not isinstance(v, bytes):
raise RuntimeError('Invalid data provided for Binary.')
v = base64.b64encode(v).decode('utf-8')
value_encoded.append(v)
value = value_encoded
value = self._serialize_data(value)
return self._create_data(variable, value)
def key_value(
self,
key: str,
value: Union[BaseModel, dict],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> Optional[int]:
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, 'KeyValue')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'KeyValue')
# basic validation and prep of value
value = self._process_object_types(value, validate)
if validate and not self.is_key_value(value):
raise RuntimeError('Invalid data provided for KeyValueArray.')
value = self._serialize_data(value)
return self._create_data(variable, value)
def key_value_array(
self,
key: str,
value: List[Union[BaseModel, dict]],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
):
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# validate array type provided
self._check_iterable(value, validate)
# convert key to variable if required
variable = self._get_variable(key, 'KeyValueArray')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'KeyValueArray')
# basic validation and prep of value
_value = []
for v in value:
v = self._process_object_types(v, validate, allow_none=True)
if validate and not self.is_key_value(v):
raise RuntimeError('Invalid data provided for KeyValueArray.')
_value.append(v)
value = _value
value = self._serialize_data(value)
return self._create_data(variable, value)
def string(
self,
key: str,
value: Union[bool, float, int, str],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> Optional[int]:
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, 'String')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'String')
# coerce string values
value = self._coerce_string_value(value)
# validation only needs to check str because value was coerced
if validate and not isinstance(value, str):
raise RuntimeError('Invalid data provided for String.')
value = self._serialize_data(value)
return self._create_data(variable, value)
def string_array(
self,
key: str,
value: List[Union[bool, float, int, str]],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
):
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# validate array type provided
self._check_iterable(value, validate)
# convert key to variable if required
variable = self._get_variable(key, 'StringArray')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'StringArray')
# basic validation and prep of value
value_coerced = []
for v in value:
# coerce string values
v = self._coerce_string_value(v)
# validation only needs to check str because value was coerced
if validate and not isinstance(v, (type(None), str)):
raise RuntimeError('Invalid data provided for StringArray.')
value_coerced.append(v)
value = value_coerced
value = self._serialize_data(value)
return self._create_data(variable, value)
# pylint: disable=unused-argument
def raw(
self,
key: str,
value: Union[bytes, str, int],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> str:
"""Create method of CRUD operation for raw data.
Raw data can only be a byte, str or int. Other data
structures (dict, list, etc) must be serialized.
"""
if self._check_null(key, value):
return None
return self._create_data(key, value)
def tc_entity(
self,
key: str,
value: Union[BaseModel, dict],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
) -> Optional[int]:
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# convert key to variable if required
variable = self._get_variable(key, 'TCEntity')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'TCEntity')
# basic validation
value = self._process_object_types(value, validate)
if validate and not self.is_tc_entity(value):
raise RuntimeError('Invalid data provided for TcEntityArray.')
value = self._serialize_data(value)
return self._create_data(variable, value)
def tc_entity_array(
self,
key: str,
value: List[Union[BaseModel, dict]],
validate: Optional[bool] = True,
when_requested: Optional[bool] = True,
):
"""Create the value in Redis if applicable."""
if self._check_null(key, value) is True:
return None
# validate array type provided
self._check_iterable(value, validate)
# convert key to variable if required
variable = self._get_variable(key, 'TCEntityArray')
if self._check_requested(variable, when_requested) is False:
return None
# quick check to ensure an invalid type was not provided
self._check_variable_type(variable, 'TCEntityArray')
# basic validation and prep of value
_value = []
for v in value:
v = self._process_object_types(v, validate, allow_none=True)
if validate and not self.is_tc_entity(v):
raise RuntimeError('Invalid data provided for TcEntityArray.')
_value.append(v)
value = _value
value = self._serialize_data(value)
return self._create_data(variable, value)
def variable(
self,
key: str,
value: Union[
'BaseModel', bytes, dict, str, List['BaseModel'], List[bytes], List[dict], List[str]
],
variable_type: Optional[str] = None,
) -> str:
"""Alias for any method of CRUD operation for working with KeyValue DB.
This method will automatically check to see if provided variable was requested by
a downstream app and if so create the data in the KeyValue DB.
Args:
key: The variable to write to the DB (e.g., app.colors).
value: The data to write to the DB.
variable_type: The variable type being written. Only required if not unique.
Returns:
(str): Result string of DB write.
"""
if self._check_null(key, value) is True:
return None
# short-circuit the process, if there are no dowstream variables requested.
if not self.output_variables: # pragma: no cover
self.log.debug(f'Variable {key} was NOT requested by downstream app.')
return None
# key can be provided as the variable key (e.g., app.output) or
# the entire (e.g., #App:1234:app.output!String). we need the
# full variable to proceed.
variable = self._get_variable(key, variable_type)
if variable is None or variable not in self.output_variables:
self.log.debug(f'Variable {key} was NOT requested by downstream app.')
return None
# write the variable
return self.any(variable, value)
| nilq/baby-python | python |
from moviepy.editor import *
clip = (VideoFileClip("../output_videos/project_video.mp4").subclip(10, 40).resize(0.3))
clip.write_gif("../output_videos/project_video.gif") | nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
admin security exceptions module.
"""
from pyrin.core.exceptions import CoreException, CoreBusinessException
from pyrin.security.exceptions import AuthorizationFailedError
class AdminSecurityException(CoreException):
"""
admin security exception.
"""
pass
class AdminSecurityBusinessException(CoreBusinessException,
AdminSecurityException):
"""
admin security business exception.
"""
pass
class AdminAccessNotAllowedError(AuthorizationFailedError,
AdminSecurityBusinessException):
"""
admin access not allowed error.
"""
pass
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
#
# Copyright (c), 2018-2019, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
"""
XPathToken and helper functions for XPath nodes. XPath error messages and node helper functions
are embedded in XPathToken class, in order to raise errors related to token instances.
In XPath there are 7 kinds of nodes:
element, attribute, text, namespace, processing-instruction, comment, document
Element-like objects are used for representing elements and comments, ElementTree-like objects
for documents. Generic tuples are used for representing attributes and named-tuples for namespaces.
"""
from __future__ import unicode_literals
import locale
import contextlib
from decimal import Decimal
from .compat import string_base_type, unicode_type
from .exceptions import xpath_error
from .namespaces import XQT_ERRORS_NAMESPACE
from .xpath_nodes import AttributeNode, TypedAttribute, TypedElement, \
is_etree_element, is_attribute_node, elem_iter_strings, is_text_node, \
is_namespace_node, is_comment_node, is_processing_instruction_node, \
is_element_node, is_document_node, is_xpath_node, is_schema_node
from .datatypes import UntypedAtomic, Timezone, DayTimeDuration, XSD_BUILTIN_TYPES
from .schema_proxy import AbstractSchemaProxy
from .tdop_parser import Token
from .xpath_context import XPathSchemaContext
def ordinal(n):
if n in {11, 12, 13}:
return '%dth' % n
least_significant_digit = n % 10
if least_significant_digit == 1:
return '%dst' % n
elif least_significant_digit == 2:
return '%dnd' % n
elif least_significant_digit == 3:
return '%drd' % n
else:
return '%dth' % n
class XPathToken(Token):
"""Base class for XPath tokens."""
comment = None # for XPath 2.0+ comments
xsd_type = None # fox XPath 2.0+ schema types labeling
def evaluate(self, context=None):
"""
Evaluate default method for XPath tokens.
:param context: The XPath dynamic context.
"""
return [x for x in self.select(context)]
def select(self, context=None):
"""
Select operator that generates XPath results.
:param context: The XPath dynamic context.
"""
item = self.evaluate(context)
if item is not None:
if isinstance(item, list):
for _item in item:
yield _item
else:
if context is not None:
context.item = item
yield item
def __str__(self):
symbol, label = self.symbol, self.label
if symbol == '$':
return '$%s variable reference' % (self[0].value if self else '')
elif symbol == ',':
return 'comma operator' if self.parser.version > '1.0' else 'comma symbol'
elif label == 'function':
return '%r function' % symbol
elif label == 'axis':
return '%r axis' % symbol
return super(XPathToken, self).__str__()
@property
def source(self):
symbol, label = self.symbol, self.label
if label == 'axis':
return '%s::%s' % (self.symbol, self[0].source)
elif label in ('function', 'constructor'):
return '%s(%s)' % (self.symbol, ', '.join(item.source for item in self))
elif symbol == ':':
return '%s:%s' % (self[0].source, self[1].source)
elif symbol == '(':
return '()' if not self else '(%s)' % self[0].source
elif symbol == '[':
return '%s[%s]' % (self[0].source, self[1].source)
elif symbol == ',':
return '%s, %s' % (self[0].source, self[1].source)
elif symbol == '$':
return '$%s' % self[0].source
elif symbol == '{':
return '{%s}%s' % (self[0].value, self[1].value)
elif symbol == 'instance':
return '%s instance of %s' % (self[0].source, ''.join(t.source for t in self[1:]))
elif symbol == 'treat':
return '%s treat as %s' % (self[0].source, ''.join(t.source for t in self[1:]))
return super(XPathToken, self).source
@property
def error_prefix(self):
for prefix, ns in self.parser.namespaces.items():
if ns == XQT_ERRORS_NAMESPACE:
return prefix
else:
return 'err'
###
# Helper methods
def get_argument(self, context, index=0, required=False, default_to_context=False,
default=None, cls=None):
"""
Get the argument value of a function of constructor token. A zero length sequence is
converted to a `None` value. If the function has no argument returns the context's
item if the dynamic context is not `None`.
:param context: the dynamic context.
:param index: an index for select the argument to be got, the first for default.
:param required: if set to `True` missing or empty sequence arguments are not allowed.
:param default_to_context: if set to `True` then the item of the dynamic context is \
returned when the argument is missing.
:param default: the default value returned in case the argument is an empty sequence. \
If not provided returns `None`.
:param cls: if a type is provided performs a type checking on item.
"""
try:
selector = self[index].select
except IndexError:
if default_to_context:
if context is None:
self.missing_context()
item = context.item if context.item is not None else context.root
elif required:
raise self.error('XPST0017', "Missing %s argument" % ordinal(index + 1))
else:
return
else:
item = None
for k, result in enumerate(selector(context)):
if k == 0:
item = result
elif not self.parser.compatibility_mode:
self.wrong_context_type("a sequence of more than one item is not allowed as argument")
else:
break
else:
if item is None:
if not required:
return default
ord_arg = ordinal(index + 1)
self.missing_sequence("A not empty sequence required for %s argument" % ord_arg)
# Type promotion checking (see "function conversion rules" in XPath 2.0 language definition)
if cls is not None and not isinstance(item, cls):
if self.parser.compatibility_mode:
if issubclass(cls, string_base_type):
return self.string_value(item)
elif issubclass(cls, float) or issubclass(float, cls):
return self.number_value(item)
if self.parser.version > '1.0':
value = self.data_value(item)
if isinstance(value, cls):
return value
elif isinstance(value, UntypedAtomic):
try:
if issubclass(cls, string_base_type):
return str(value)
else:
return cls(value)
except (TypeError, ValueError):
pass
code = 'XPTY0004' if self.label == 'function' else 'FORG0006'
message = "the %s argument %r is not an instance of %r"
raise self.error(code, message % (ordinal(index + 1), item, cls))
return item
def atomization(self, context=None):
"""
Helper method for value atomization of a sequence.
Ref: https://www.w3.org/TR/xpath20/#id-atomization
:param context: the XPath context.
"""
for item in self.select(context):
value = self.data_value(item)
if value is None:
raise self.error('FOTY0012', "argument node {!r} does not have a typed value".format(item))
else:
yield value
def get_atomized_operand(self, context=None):
"""
Get the atomized value for an XPath operator.
:param context: the XPath context.
:return: the atomized value of a single length sequence or `None` if the sequence is empty.
"""
selector = iter(self.atomization(context))
try:
value = next(selector)
except StopIteration:
return
else:
try:
next(selector)
except StopIteration:
if isinstance(value, UntypedAtomic):
value = str(value)
if isinstance(context, XPathSchemaContext):
return value
if self.xsd_type is not None and isinstance(value, string_base_type):
try:
value = self.xsd_type.decode(value)
except (TypeError, ValueError):
msg = "Type {!r} is not appropriate for the context"
self.wrong_context_type(msg.format(type(value)))
return value
else:
self.wrong_context_type("atomized operand is a sequence of length greater than one")
def get_comparison_data(self, context):
"""
Get comparison data couples for the general comparison of sequences. Different sequences
maybe generated with an XPath 2.0 parser, depending on compatibility mode setting.
Ref: https://www.w3.org/TR/xpath20/#id-general-comparisons
:param context: the XPath dynamic context.
:returns: a list of data couples.
"""
if context is None:
operand1 = [x for x in self[0].select()]
operand2 = [x for x in self[1].select()]
else:
operand1 = [x for x in self[0].select(context.copy())]
operand2 = [x for x in self[1].select(context.copy())]
if self.parser.compatibility_mode:
# Boolean comparison if one of the results is a single boolean value (1.)
try:
if isinstance(operand1[0], bool):
if len(operand1) == 1:
return [(operand1[0], self.boolean_value(operand2))]
if isinstance(operand2[0], bool):
if len(operand2) == 1:
return [(self.boolean_value(operand1), operand2[0])]
except IndexError:
return []
# Converts to float for lesser-greater operators (3.)
if self.symbol in ('<', '<=', '>', '>='):
return [
(float(self.data_value(value1)), float(self.data_value(value2)))
for value1 in operand1 for value2 in operand2
]
return [(self.data_value(value1), self.data_value(value2))
for value1 in operand1 for value2 in operand2]
def select_results(self, context):
"""
Generates formatted XPath results.
:param context: the XPath dynamic context.
"""
for result in self.select(context):
if isinstance(result, TypedElement):
yield result[0]
elif isinstance(result, AttributeNode):
yield result[1]
elif isinstance(result, TypedAttribute):
yield result[0][1] if hasattr(result[0][1], 'type') else result[1]
else:
yield result
def get_results(self, context):
"""
Returns formatted XPath results.
:param context: the XPath dynamic context.
:return: a list or a simple datatype when the result is a single simple type \
generated by a literal or function token.
"""
results = [x for x in self.select_results(context)]
if len(results) == 1:
res = results[0]
if isinstance(res, (bool, int, float, Decimal)):
return res
elif isinstance(res, tuple) or is_etree_element(res) or is_document_node(res):
return results
elif is_schema_node(res):
return results
elif self.symbol in ('text', 'node'):
return results
elif self.label in ('function', 'literal'):
return res
else:
return results
else:
return results
def get_operands(self, context, cls=None):
"""
Returns the operands for a binary operator. Float arguments are converted
to decimal if the other argument is a `Decimal` instance.
:param context: the XPath dynamic context.
:param cls: if a type is provided performs a type checking on item.
:return: a couple of values representing the operands. If any operand \
is not available returns a `(None, None)` couple.
"""
arg1 = self.get_argument(context, cls=cls)
if arg1 is None:
return None, None
arg2 = self.get_argument(context, index=1, cls=cls)
if arg2 is None:
return None, None
if isinstance(arg1, Decimal) and isinstance(arg2, float):
return arg1, Decimal(arg2)
elif isinstance(arg2, Decimal) and isinstance(arg1, float):
return Decimal(arg1), arg2
return arg1, arg2
def adjust_datetime(self, context, cls):
"""
XSD datetime adjust function helper.
:param context: the XPath dynamic context.
:param cls: the XSD datetime subclass to use.
:return: an empty list if there is only one argument that is the empty sequence \
or the adjusted XSD datetime instance.
"""
if len(self) == 1:
item = self.get_argument(context, cls=cls)
if item is None:
return []
timezone = getattr(context, 'timezone', None)
else:
item = self.get_argument(context=None, cls=cls) # don't use implicit timezone
timezone = self.get_argument(context, 1, cls=DayTimeDuration)
if timezone is not None:
timezone = Timezone.fromduration(timezone)
if item.tzinfo is not None and timezone is not None:
item += timezone.offset - item.tzinfo.offset
item.tzinfo = timezone
elif item.tzinfo is None:
if timezone is not None:
item.tzinfo = timezone
elif timezone is None:
item.tzinfo = None
return item
def match_xsd_type(self, schema_item, name):
"""
Match a token with a schema type, checking the matching between the provided schema
item and name. If there is a match and the token is already related with another
schema type an exception is raised.
:param schema_item: an XPath item related with a schema instance.
:param name: a QName in extended format for matching the item.
:returns: the matched XSD type or `None` if there isn't a match.
"""
if isinstance(schema_item, AttributeNode):
if not schema_item[1].is_matching(name):
return
try:
xsd_type = schema_item[1].type
except AttributeError:
try:
xsd_type = self.parser.schema.get_attribute(name).type
except AttributeError:
return
elif is_etree_element(schema_item):
if hasattr(schema_item, 'is_matching'):
if not schema_item.is_matching(name, self.parser.default_namespace):
return
elif schema_item.tag != name:
return
try:
xsd_type = schema_item.type
except AttributeError:
try:
xsd_type = self.parser.schema.get_element(name).type
except AttributeError:
return
else:
return
if self.xsd_type is None:
self.xsd_type = xsd_type
elif self.xsd_type is not xsd_type:
self.wrong_context_type("Multiple XSD type matching during static analysis")
return xsd_type
def get_typed_node(self, context, item):
"""
Returns a typed node if the token is bound to an XSD type.
:param context: the XPath dynamic context.
:param item: an untyped XPath attribute ot element.
"""
if isinstance(self.xsd_type, (type(None), AbstractSchemaProxy)):
return item
if isinstance(context, XPathSchemaContext):
primitive_type = self.parser.schema.get_primitive_type(self.xsd_type)
try:
value = XSD_BUILTIN_TYPES[primitive_type.local_name or 'anyType'].value
except KeyError:
value = XSD_BUILTIN_TYPES['anyType'].value
if isinstance(item, AttributeNode):
return TypedAttribute(item, value)
else:
return TypedElement(item, value)
else:
try:
if isinstance(item, AttributeNode):
return TypedAttribute(item, self.xsd_type.decode(item[1]))
elif self.xsd_type.is_simple() or self.xsd_type.has_simple_content():
return TypedElement(item, self.xsd_type.decode(item.text))
else:
return item
except (TypeError, ValueError):
msg = "Type {!r} does not match sequence type of {!r}"
self.wrong_sequence_type(msg.format(self.xsd_type, item))
@contextlib.contextmanager
def use_locale(self, collation):
"""A context manager for setting a specific collation for a code block."""
locale.setlocale(locale.LC_ALL, '')
default_locale = locale.getlocale()
try:
locale.setlocale(locale.LC_ALL, collation)
except locale.Error:
raise self.error('FOCH0002', 'Unsupported collation %r' % collation)
else:
yield
finally:
locale.setlocale(locale.LC_ALL, default_locale)
###
# XPath data accessors base functions
def data_value(self, obj):
"""
The typed value, as computed by fn:data() on each item. Returns an instance of
UntypedAtomic.
"""
if is_attribute_node(obj) or isinstance(obj, TypedElement):
obj = obj[1]
if obj is None:
return
elif not is_xpath_node(obj):
return obj
elif hasattr(obj, 'type'):
return self.schema_node_value(obj) # Schema context
return UntypedAtomic(self.string_value(obj))
def boolean_value(self, obj):
"""
The effective boolean value, as computed by fn:boolean().
"""
if isinstance(obj, list):
if not obj:
return False
elif isinstance(obj[0], tuple) or is_element_node(obj[0]):
return True
elif len(obj) == 1:
return bool(obj[0])
else:
raise self.error(
code='FORG0006',
message="Effective boolean value is not defined for a sequence of two or "
"more items not starting with an XPath node.",
)
elif isinstance(obj, tuple) or is_element_node(obj):
raise self.error('FORG0006', "Effective boolean value is not defined for {}.".format(obj))
return bool(obj)
def string_value(self, obj):
"""
The string value, as computed by fn:string().
"""
if obj is None:
return ''
elif is_element_node(obj):
return ''.join(elem_iter_strings(obj))
elif is_attribute_node(obj):
return unicode_type(obj[1])
elif is_text_node(obj):
return obj
elif is_document_node(obj):
return ''.join(e.text for e in obj.getroot().iter() if e.text is not None)
elif is_namespace_node(obj):
return obj[1]
elif is_comment_node(obj):
return obj.text
elif is_processing_instruction_node(obj):
return obj.text
elif is_schema_node(obj):
return str(self.schema_node_value(obj))
else:
return str(obj)
def number_value(self, obj):
"""
The numeric value, as computed by fn:number() on each item. Returns a float value.
"""
try:
return float(self.string_value(obj) if is_xpath_node(obj) else obj)
except (TypeError, ValueError):
return float('nan')
def schema_node_value(self, obj):
"""
Returns a sample typed value for the XSD schema node, valid in the value space
of the node. Used for schema-based dynamic evaluation of XPath expressions.
"""
try:
if obj.type.is_simple() or obj.type.has_simple_content():
# In case of schema element or attribute use a the sample value
# of the primitive type
primitive_type = self.parser.schema.get_primitive_type(obj.type)
return XSD_BUILTIN_TYPES[primitive_type.local_name].value
elif obj.type.local_name == 'anyType':
return XSD_BUILTIN_TYPES['anyType'].value
else:
return UntypedAtomic('')
except AttributeError:
raise self.wrong_type("the argument %r is not a node of an XSD schema" % obj)
###
# Error handling helpers
def error(self, code, message=None):
"""
Returns an XPath error instance related with a code. An XPath/XQuery/XSLT error code is an
alphanumeric token starting with four uppercase letters and ending with four digits.
:param code: the error code.
:param message: an optional custom additional message.
"""
return xpath_error(code, message, self, self.error_prefix)
# Shortcuts for XPath errors
def wrong_syntax(self, message=None):
if self.symbol == '::' and self.parser.token.symbol == '(name)':
self.missing_axis(message or "Axis '%s::' not found" % self.parser.token.value)
super(XPathToken, self).wrong_syntax(message)
def wrong_value(self, message=None):
raise self.error('FOCA0002', message)
def wrong_type(self, message=None):
raise self.error('FORG0006', message)
def missing_schema(self, message=None):
raise self.error('XPST0001', message)
def missing_context(self, message=None):
raise self.error('XPDY0002', message)
def wrong_context_type(self, message=None):
raise self.error('XPTY0004', message)
def missing_sequence(self, message=None):
raise self.error('XPST0005', message)
def missing_name(self, message=None):
raise self.error('XPST0008', message)
def missing_axis(self, message=None):
raise self.error('XPST0010', message)
def wrong_nargs(self, message=None):
raise self.error('XPST0017', message)
def wrong_step_result(self, message=None):
raise self.error('XPTY0018', message)
def wrong_intermediate_step_result(self, message=None):
raise self.error('XPTY0019', message)
def wrong_axis_argument(self, message=None):
raise self.error('XPTY0020', message)
def wrong_sequence_type(self, message=None):
raise self.error('XPDY0050', message)
def unknown_atomic_type(self, message=None):
raise self.error('XPST0051', message)
def wrong_target_type(self, message=None):
raise self.error('XPST0080', message)
def unknown_namespace(self, message=None):
raise self.error('XPST0081', message)
| nilq/baby-python | python |
#! /usr/bin/env python3
# Conditions:
# * A child is playing with a ball on the nth floor of a tall building
# * The height of this floor, h, is known
# * He drops the ball out of the window. The ball bounces (for example),
# to two-thirds of its height (a bounce of 0.66).
# * His mother looks out of a window 1.5 meters from the ground.
# * How many times will the mother see the ball pass in front of her
# window (including when it's falling and bouncing?
#
# -- Three conditions must be met for a valid experiment:
# 1) Float parameter "h" in meters must be greater than 0
# 2) Float parameter "bounce" must be greater than 0 and less than 1
# 3) Float parameter "window" must be less than h.
# == If all three conditions above are fulfilled, return a positive
# integer, otherwise return -1.
#
# Note: The ball can only be seen if the height of the rebounding ball
# is strictly greater than the window parameter.
#
# Example:
# 1) h = 3, bounce = 0.66, window = 1.5, result is 3
# 2) h = 3, bounce = 1, window = 1.5, result is -1 (*)
# (*) Condition 2 not fulfilled.
#
def bouncing_ball(h, bounce, window):
if h <= 0 or not (0 < bounce < 1) or window >= h:
return -1
count = 0
while h > window:
count += 2 if count % 2 == 1 else 1
h *= bounce
return count
def test_bouncing_ball():
assert -1 == bouncing_ball(-1, 0, 0)
assert -1 == bouncing_ball(0, 0, 0)
assert -1 == bouncing_ball(1, -1, 0)
assert -1 == bouncing_ball(1, 0, 0)
assert -1 == bouncing_ball(1, 1, 0)
assert -1 == bouncing_ball(1, 2, 0)
assert -1 == bouncing_ball(1, .5, 1)
assert -1 == bouncing_ball(1, .5, 2)
assert 3 == bouncing_ball(3, 0.66, 1.5)
assert -1 == bouncing_ball(3, 1, 1.5)
if __name__ == '__main__':
test_bouncing_ball()
| nilq/baby-python | python |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lbrynet/schema/proto/source.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lbrynet/schema/proto/source.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n!lbrynet/schema/proto/source.proto\"\xde\x01\n\x06Source\x12 \n\x07version\x18\x01 \x02(\x0e\x32\x0f.Source.Version\x12\'\n\nsourceType\x18\x02 \x02(\x0e\x32\x13.Source.SourceTypes\x12\x0e\n\x06source\x18\x03 \x02(\x0c\x12\x13\n\x0b\x63ontentType\x18\x04 \x02(\t\"*\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01\"8\n\x0bSourceTypes\x12\x17\n\x13UNKNOWN_SOURCE_TYPE\x10\x00\x12\x10\n\x0clbry_sd_hash\x10\x01')
)
_SOURCE_VERSION = _descriptor.EnumDescriptor(
name='Version',
full_name='Source.Version',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_VERSION', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='_0_0_1', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=160,
serialized_end=202,
)
_sym_db.RegisterEnumDescriptor(_SOURCE_VERSION)
_SOURCE_SOURCETYPES = _descriptor.EnumDescriptor(
name='SourceTypes',
full_name='Source.SourceTypes',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_SOURCE_TYPE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='lbry_sd_hash', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=204,
serialized_end=260,
)
_sym_db.RegisterEnumDescriptor(_SOURCE_SOURCETYPES)
_SOURCE = _descriptor.Descriptor(
name='Source',
full_name='Source',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='Source.version', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sourceType', full_name='Source.sourceType', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='Source.source', index=2,
number=3, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contentType', full_name='Source.contentType', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOURCE_VERSION,
_SOURCE_SOURCETYPES,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=260,
)
_SOURCE.fields_by_name['version'].enum_type = _SOURCE_VERSION
_SOURCE.fields_by_name['sourceType'].enum_type = _SOURCE_SOURCETYPES
_SOURCE_VERSION.containing_type = _SOURCE
_SOURCE_SOURCETYPES.containing_type = _SOURCE
DESCRIPTOR.message_types_by_name['Source'] = _SOURCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Source = _reflection.GeneratedProtocolMessageType('Source', (_message.Message,), dict(
DESCRIPTOR = _SOURCE,
__module__ = 'lbrynet.schema.proto.source_pb2'
# @@protoc_insertion_point(class_scope:Source)
))
_sym_db.RegisterMessage(Source)
# @@protoc_insertion_point(module_scope)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 9 23:58:12 2021
@author: AKayal
"""
from collections import namedtuple
from typing import List, NamedTuple
import datetime
from datetime import date
class personal_details(NamedTuple):
"""
Using the typing module, we can be even more explicit about our data structures.
https://realpython.com/python-namedtuple/
"""
ssn: str
first_name: str
last_name: str
gender: str
language: str
| nilq/baby-python | python |
from whirlwind.store import create_task
from delfick_project.norms import sb, dictobj, Meta
from tornado.web import RequestHandler, HTTPError
from tornado import websocket
import binascii
import logging
import asyncio
import json
import uuid
log = logging.getLogger("whirlwind.request_handlers.base")
class Finished(Exception):
def __init__(self, status=500, **kwargs):
self.kwargs = kwargs
self.kwargs["status"] = status
def as_dict(self):
return self.kwargs
def reprer(o):
if type(o) is bytes:
return binascii.hexlify(o).decode()
return repr(o)
class MessageFromExc:
def __init__(self, *, log_exceptions=True, see_exception=None):
self.see_exception = see_exception
self.log_exceptions = log_exceptions
def __call__(self, exc_type, exc, tb):
if isinstance(exc, Finished):
return exc.kwargs
else:
return self.process(exc_type, exc, tb)
def process(self, exc_type, exc, tb):
if self.see_exception:
self.see_exception(exc_type, exc, tb)
if exc_type is asyncio.CancelledError:
return {
"status": 500,
"error": "Request was cancelled",
"error_code": "RequestCancelled",
}
else:
if self.see_exception is None and self.log_exceptions:
log.error(exc, exc_info=(exc_type, exc, tb))
return {
"status": 500,
"error": "Internal Server Error",
"error_code": "InternalServerError",
}
class AsyncCatcher(object):
def __init__(self, request, info, final=None):
self.info = info
self.final = final
self.request = request
async def __aenter__(self):
pass
async def __aexit__(self, exc_type, exc, tb):
if exc is None:
self.complete(self.info.get("result"), status=200)
return
msg = self.request.message_from_exc(exc_type, exc, tb)
self.complete(msg, status=500, exc_info=(exc_type, exc, tb))
# And don't reraise the exception
return True
def send_msg(self, msg, status=200, exc_info=None):
if self.request._finished and not hasattr(self.request, "ws_connection"):
if type(msg) is dict:
msg = json.dumps(msg, default=self.request.reprer, sort_keys=True, indent=" ")
self.request.hook("request_already_finished", msg)
return
if hasattr(msg, "exc_info") and exc_info is None:
exc_info = msg.exc_info
if self.final is None:
self.request.send_msg(msg, status, exc_info=exc_info)
else:
self.final(msg, exc_info=exc_info)
def complete(self, msg, status=sb.NotSpecified, exc_info=None):
if type(msg) is dict:
result = json.loads(json.dumps(msg, default=self.request.reprer, indent=" "))
else:
result = msg
self.send_msg(result, status=status, exc_info=exc_info)
class RequestsMixin:
"""
A mixin class you may use for your handler which provides some handy methods
for dealing with data
"""
_merged_options_formattable = True
def hook(self, func, *args, **kwargs):
if hasattr(self, func):
return getattr(self, func)(*args, **kwargs)
# def process_reply(self, msg, exc_info=None):
# """A hook that provides the msg sent as reply or progress"""
# pass
# def request_already_finished(self, msg):
# """Hook for when we would send a message to an already closed websocket"""
@property
def reprer(self):
if not hasattr(self, "_reprer"):
self._reprer = reprer
return self._reprer
@reprer.setter
def reprer(self, value):
self._reprer = value
@property
def message_from_exc(self):
if not hasattr(self, "_message_from_exc"):
self._message_from_exc = MessageFromExc(
see_exception=self.see_returned_exception,
log_exceptions=getattr(self, "log_exceptions", True),
)
return self._message_from_exc
def see_returned_exception(self, exc_typ, exc, tb):
if getattr(self, "log_exceptions", True):
log.error(exc, exc_info=(exc_typ, exc, tb))
@message_from_exc.setter
def message_from_exc(self, value):
self._message_from_exc = value
def async_catcher(self, info, final=None):
return AsyncCatcher(self, info, final=final)
def body_as_json(self, body=None):
"""
Return the body of the request as a json object
If there is a special ``__body__`` file in the request, we will consider this
to be the body instead of the request body
"""
if body is None:
if "__body__" in self.request.files:
body = self.request.files["__body__"][0]["body"].decode()
else:
body = self.request.body.decode()
try:
if type(body) is str:
body = json.loads(body)
except (TypeError, ValueError) as error:
self.log_json_error(body, error)
raise Finished(status=400, reason="Failed to load body as json", error=error)
return body
def log_json_error(self, body, error):
"""
Do any logging for errors parsing the request body as json
"""
log.error("Failed to load body as json\t%s", body)
def send_msg(self, msg, status=sb.NotSpecified, exc_info=None):
"""
This determines what content-type and exact body to write to the response
If ``msg`` has ``as_dict``, we call it.
If ``msg`` is a dictionary and has status, we use that as the status of
the request, otherwise we say it's a 200.
If there is ``html`` in ``msg``, we use that as the body of the request.
If ``msg`` is None, we close without a body.
* If ``msg`` is a ``dict`` or ``list``, we write it as a json object.
* If ``msg`` starts with ``<html>`` or ``<!DOCTYPE html>`` we treat it
as html content
* Otherwise we write ``msg`` as ``text/plain``
"""
if hasattr(msg, "exc_info") and exc_info is None:
exc_info = msg.exc_info
if hasattr(msg, "as_dict"):
msg = msg.as_dict()
self.hook("process_reply", msg, exc_info=exc_info)
if type(msg) is dict and "status" in msg:
status = msg["status"]
elif exc_info and exc_info[1]:
if hasattr(exc_info[1], "status"):
status = exc_info[1].status
else:
status = 500
if status is sb.NotSpecified:
status = 200
self.set_status(status)
if type(msg) is dict and "html" in msg:
msg = msg["html"]
if msg is None:
self.finish()
return
if type(msg) in (dict, list):
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(json.dumps(msg, default=self.reprer, sort_keys=True, indent=" "))
elif msg.lstrip().startswith("<html>") or msg.lstrip().startswith("<!DOCTYPE html>"):
self.write(msg)
else:
self.set_header("Content-Type", "text/plain; charset=UTF-8")
self.write(msg)
self.finish()
class Simple(RequestsMixin, RequestHandler):
"""
Helper for using ``self.async_catcher`` from ``RequestsMixin`` for most HTTP verbs.
.. code-block:: python
class MyRequestHandler(Simple):
async def do_get():
return "<html><body><p>lol</p></body></html>"
Essentially you define ``async def do_<verb>(self)`` methods for each verb
you want to support.
This supports
* get
* put
* post
* patch
* delete
"""
log_exceptions = True
async def get(self, *args, **kwargs):
if not hasattr(self, "do_get"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_get(*args, **kwargs)
async def put(self, *args, **kwargs):
if not hasattr(self, "do_put"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_put(*args, **kwargs)
async def post(self, *args, **kwargs):
if not hasattr(self, "do_post"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_post(*args, **kwargs)
async def patch(self, *args, **kwargs):
if not hasattr(self, "do_patch"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_patch(*args, **kwargs)
async def delete(self, *args, **kwargs):
if not hasattr(self, "do_delete"):
raise HTTPError(405)
info = {"result": None}
async with self.async_catcher(info):
info["result"] = await self.do_delete(*args, **kwargs)
json_spec = sb.match_spec(
(bool, sb.any_spec()),
(int, sb.any_spec()),
(float, sb.any_spec()),
(str, sb.any_spec()),
(list, lambda: sb.listof(json_spec)),
(type(None), sb.any_spec()),
fallback=lambda: sb.dictof(sb.string_spec(), json_spec),
)
class SimpleWebSocketBase(RequestsMixin, websocket.WebSocketHandler):
"""
Used for websocket handlers
Implement ``process_message``
.. automethod:: whirlwind.request_handlers.base.SimpleWebSocketBase.process_message
This class takes in messages of the form ``{"path": <string>, "message_id": <string>, "body": <dictionary}``
It will respond with messages of the form ``{"reply": <reply>, "message_id": <message_id>}``
It treats path of ``__tick__`` as special and respond with ``{"reply": {"ok": "thankyou"}, "message_id": "__tick__"}``
It relies on the client side closing the connection when it's finished.
"""
log_exceptions = True
def initialize(self, final_future, server_time, wsconnections):
self.server_time = server_time
self.final_future = final_future
self.wsconnections = wsconnections
class WSMessage(dictobj.Spec):
path = dictobj.Field(sb.string_spec, wrapper=sb.required)
message_id = dictobj.Field(
sb.or_spec(sb.string_spec(), sb.tupleof(sb.string_spec())), wrapper=sb.required
)
body = dictobj.Field(json_spec, wrapper=sb.required)
message_spec = WSMessage.FieldSpec()
class Closing(object):
pass
def open(self):
self.key = str(uuid.uuid1())
self.connection_future = asyncio.Future()
if self.final_future.done():
self.connection_future.cancel()
return
canceller = lambda res: self.connection_future.cancel()
self.final_future.add_done_callback(canceller)
self.connection_future.add_done_callback(
lambda res: self.final_future.remove_done_callback(canceller)
)
if self.server_time is not None:
self.reply(self.server_time, message_id="__server_time__")
self.hook("websocket_opened")
def reply(self, msg, message_id=None, exc_info=None):
if msg is None:
msg = {"done": True}
# I bypass tornado converting the dictionary so that non jsonable things can be repr'd
if hasattr(msg, "as_dict"):
msg = msg.as_dict()
reply = {"reply": msg, "message_id": message_id}
reply = json.dumps(reply, default=self.reprer).replace("</", "<\\/")
if message_id not in ("__tick__", "__server_time__"):
self.hook("process_reply", msg, exc_info=exc_info)
if self.ws_connection:
self.write_message(reply)
def on_message(self, message):
self.hook("websocket_message", message)
try:
parsed = json.loads(message)
except (TypeError, ValueError) as error:
self.reply({"error": "Message wasn't valid json\t{0}".format(str(error))})
return
if type(parsed) is dict and "path" in parsed and parsed["path"] == "__tick__":
parsed["message_id"] = "__tick__"
parsed["body"] = "__tick__"
try:
msg = self.message_spec.normalise(Meta.empty(), parsed)
except Exception as error:
self.hook("websocket_invalid_message", error, parsed)
if hasattr(error, "as_dict"):
error = error.as_dict()
else:
error = str(error)
self.reply({"error_code": "InvalidMessage", "error": error})
else:
path = msg.path
body = msg.body
message_id = msg.message_id
message_key = str(uuid.uuid4())
if path == "__tick__":
self.reply({"ok": "thankyou"}, message_id=message_id)
return
def on_processed(final, exc_info=None):
if final is self.Closing:
self.reply({"closing": "goodbye"}, message_id=message_id)
self.close()
else:
self.reply(final, message_id=message_id, exc_info=exc_info)
try:
self.message_done(msg, final, message_key, exc_info=exc_info)
except Exception as error:
self.handle_message_done_error(error, msg, final, message_key, exc_info)
async def doit():
info = {}
def progress_cb(progress, **kwargs):
for m in self.transform_progress(msg, progress, **kwargs):
self.reply(m, message_id=message_id)
async with self.async_catcher(info, on_processed):
result = await self.process_message(
path, body, message_id, message_key, progress_cb
)
if isinstance(result, asyncio.Future) or hasattr(result, "__await__"):
result = await result
info["result"] = result
def done(res):
if message_key in self.wsconnections:
del self.wsconnections[message_key]
if not res.cancelled():
self.handle_request_done_exception(res.exception())
t = create_task(doit(), name=f"<process_command: {body}>")
t.add_done_callback(done)
self.wsconnections[message_key] = t
def message_done(self, request, final, message_key, exc_info=None):
"""
Hook for when we have finished processing a request
By default nothing is done.
request
The original request
final
The last response to be sent back.
message_key
The uuid the server generated for this request
exc_info
The (exc_type, exc, traceback) for any exception that stopped the processing of the request
"""
def handle_message_done_error(self, error, msg, final, message_key, exc_info):
"""
Hook for when message_done raised an exception
By default we ``log.exception(error)``
error
The exception that was raised
request
The original request
final
The last response to be sent back.
message_key
The uuid the server generated for this request
exc_info
The (exc_type, exc, traceback) for any exception that stopped the processing of the request
before message_done had been called
"""
log.exception(error)
def handle_request_done_exception(self, error):
"""Hook that takes in an exception from the entire request"""
if error and self.log_exceptions:
log.exception(error, exc_info=(type(error), error, error.__traceback__))
def transform_progress(self, body, progress, **kwargs):
"""
Hook for transforming progress messages. This must be a generator that yields 0 or more messages
So when the ``progress_cb`` is called like ``progress_cb("some message", arg=1)`` we will do:
.. code-block:: python
for m in self.transform_progress(<request>, "some message", arg=1):
# write ``{"reply": m, "message_id": <message_id>}``
where ``<request>`` is the entire message that started this stream.
By default kwargs are ignored and we just yield ``{"progress": progress}`` once
"""
yield {"progress": progress}
async def process_message(self, path, body, message_id, message_key, progress_cb):
"""
Return the response to be sent back when we get a message from the conn.
path
The uri specified in the message
body
The body specified in the message
message_id
The unique message_id for this stream of requests as supplied in the request
message_key
A unique id for this stream created by the server
progress_cb
A callback that will send a message of the form ``{"progress": <progress>, "message_id": <message_id}``
where ``<progress>`` is the argument passed into the callback
"""
raise NotImplementedError
def on_close(self):
"""Hook for when a websocket connection closes"""
self.connection_future.cancel()
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 22 14:33:38 2017
@author: paul
"""
from weatherTLKT import Weather
typ='ens'
for ss in range(1,9):
if typ=='solo':
mydate='20171127'
website='http://nomads.ncep.noaa.gov:9090/dods'
model='gfs'
resolution='0p25'
url=website+'/'+model+'_'+resolution+'/'+model+mydate+'/'+model+'_'+resolution+'_00z'
pathToSaveObj='../data/'+ model+mydate+'_'+resolution
else :
mydate='20171127'
website='http://nomads.ncep.noaa.gov:9090/dods'
model='gens'
resolution='0p25'
num_scenario='0'+str(ss)
url=website+'/'+model+'/'+model+mydate+'/'+'gep'+num_scenario+'_00z'
pathToSaveObj='../data/'+ model+mydate+'_'+num_scenario
latBound=[43,50]
lonBound=[-10+360, 360]
Weather.download(url,pathToSaveObj,latBound=latBound,lonBound=lonBound,timeSteps=[0,85],ens=True)
| nilq/baby-python | python |
from django.views.generic import TemplateView, ListView, DetailView
from . import models
class DashboardView(TemplateView):
template_name = "organizations/dashboard.html"
class OrganizationDetailView(DetailView):
template_name = "organizations/organization_details.html"
model = models.Organization
class OrganizationListView(ListView):
template_name = "organizations/organization_list.html"
model = models.Organization
class OrganizationalUnitDetailView(DetailView):
template_name = "organizations/organizational_unit_details.html"
model = models.OrganizationalUnit
class OrganizationalUnitListView(ListView):
template_name = "organizations/organizational_unit_list.html"
model = models.OrganizationalUnit
| nilq/baby-python | python |
import csv
import xlsxwriter
import datetime
# Sequence Analysis Data Object
# Holds all items needed for analysis
class SeqData:
its_dict = None
seq_config = None
num_threads = None
output_format = None
def __init__(self, its_dict, seq_config, num_threads, output_format):
self.num_threads = num_threads
self.its_dict = its_dict
self.seq_config = seq_config
self.output_format = output_format
# Sequence Analysis Run Object
# Put into queue; used in Perform()
class SeqRun:
p_id = None
path = None
def __init__(self, p_id, path):
self.p_id = p_id
self.path = path
# Output Object
# sent to output functions
class OutData:
batch_store = None
seq_config = None
results = None
def __init__(self, batch_store, seq_config, results):
self.batch_store = batch_store
self.seq_config = seq_config
self.results = results
# Output to CSV format
def output_csv(out_data):
"This method outputs the analysis results to a .csv file"
# output code
print("Output in .csv")
# create + write csv file
out_file = out_data.seq_config['outputDirPath'] +'//'+ "LC2-"+out_data.batch_store+"-"+out_data.seq_config['seqType']+"-"+str(out_data.seq_config['PauseDur']).replace('.','p')+"-"+str(out_data.seq_config['roundingEnabled'])+"-"+datetime.datetime.now().strftime('%m%d%y-%H%M')+".csv"
with open( out_file, 'wb') as f:#open csv file to be written in
csv_writer = csv.writer(f, delimiter = ',')
for line in out_data.results:#loop to write rows to csv file
line = line.split(',')
csv_writer.writerow(line)
# Output to TXT format
def ouput_txt(out_data):
"This method outputs the analysis results to a .txt file"
# output code
print("Output in .txt")
# create + write txt file
out_file = out_data.seq_config['outputDirPath'] +'//'+ "LC2-"+out_data.batch_store+"-"+out_data.seq_config['seqType']+"-"+str(out_data.seq_config['PauseDur']).replace('.','p')+"-"+str(out_data.seq_config['roundingEnabled'])+"-"+datetime.datetime.now().strftime('%m%d%y-%H%M')+".txt"
with open(out_file,'w') as f:
for line in out_data.results:
f.writelines(line+"\n")
# Output to Excel format
def output_xlsx(out_data):
"This method outputs the analysis results to a .xlsx file"
print("Output in .xlsx")
# create workbook & add sheet
out_file = out_data.seq_config['outputDirPath'] +'//'+ "LC2-"+out_data.batch_store+"-"+out_data.seq_config['seqType']+"-"+str(out_data.seq_config['PauseDur']).replace('.','p')+"-"+str(out_data.seq_config['roundingEnabled'])+"-"+datetime.datetime.now().strftime('%m%d%y-%H%M')+".xlsx"
workbook = xlsxwriter.Workbook(out_file)
worksheet = workbook.add_worksheet()
# start from first cell
row = 0
# insert into worksheet
for line in out_data.results:
col = 0
for cell in str(line).split(","):
worksheet.write(row, col, cell)
col += 1
row += 1
# close file
workbook.close() | nilq/baby-python | python |
import pandas as pd
from strategy.astrategy import AStrategy
from processor.processor import Processor as p
from datetime import timedelta
import pytz
from tqdm import tqdm
from time import sleep
pd.options.mode.chained_assignment = None
class ProgressReport(AStrategy):
def __init__(self,start_date,end_date,modeling_params={},trading_params={"value":True,"requirement":5}):
super().__init__(f"progress_report",
start_date,
end_date,
{"market":{}},modeling_params=modeling_params,trading_params=trading_params)
self.exit_days = 45
self.last_call_day = 90
@classmethod
def required_params(self):
required = {"timeframe":"quarterly"
,"requirement":5}
return required
def create_sim(self):
if self.simmed:
self.db.connect()
sim = self.db.retrieve("sim")
self.db.disconnect()
else:
start_year = self.start_date.year
end_year = self.end_date.year
market = self.subscriptions["market"]["db"]
market.connect()
self.db.connect()
tickers = market.retrieve_tickers("prices")
sim = []
for ticker in tqdm(tickers["ticker"].unique(),desc=f"{self.name}_sim"):
prices = market.retrieve_ticker_prices("prices",ticker)
prices = p.column_date_processing(prices)
prices["year"] = [x.year for x in prices["date"]]
prices["quarter"] = [x.quarter for x in prices["date"]]
for year in range(start_year,end_year):
for quarter in range(1,5):
try:
ticker_data = prices[(prices["year"]==year) & (prices["quarter"]==quarter)].sort_values("date")
sp = ticker_data.iloc[0]["adjclose"].item()
ticker_data["quarter_start"] = sp
ticker_data["delta"] = (ticker_data["adjclose"] - sp) / sp
ticker_data = ticker_data[["date","adjclose","delta","ticker"]]
for param in self.modeling_params:
ticker_data[param]=self.modeling_params[param]
sim.append(ticker_data)
self.db.store("sim",ticker_data)
except Exception as e:
continue
sim = pd.concat(sim)
self.db.disconnect()
market.disconnect()
self.simmed = True
return sim
def create_rec(self,date):
self.db.connect()
rec = self.db.query("rec",self.modeling_params)
self.db.disconnect()
if rec.index.size > 1:
rec = p.column_date_processing(rec)
small_rec = rec[rec["date"]>=date]
if small_rec.index.size > 1:
return small_rec
else:
year = date.year
month = date.month
quarter = int((month-1)/3) + 1
market = self.subscriptions["market"]["db"]
market.connect()
self.db.connect()
tickers = market.retrieve_tickers("prices")
sim = []
for ticker in tqdm(tickers["ticker"].unique(),desc=f"{self.name}_sim"):
prices = market.retrieve_ticker_prices("prices",ticker)
prices = p.column_date_processing(prices)
prices["year"] = [x.year for x in prices["date"]]
prices["quarter"] = [x.quarter for x in prices["date"]]
try:
ticker_data = prices[(prices["year"]==year) & (prices["quarter"]==quarter)].sort_values("date")
sp = ticker_data.iloc[0]["adjclose"].item()
ticker_data["quarter_start"] = sp
ticker_data["delta"] = (ticker_data["adjclose"] - sp) / sp
ticker_data = ticker_data[["date","adjclose","delta","ticker"]]
for param in self.modeling_params:
ticker_data[param]=self.modeling_params[param]
sim.append(ticker_data.tail(1))
self.db.store("rec",ticker_data.tail(1))
except Exception as e:
continue
recs = pd.concat(sim)
self.db.disconnect()
market.disconnect()
return recs | nilq/baby-python | python |
from functools import reduce
from operator import mul
import numpy as onp
from numpy.testing import assert_allclose
import pytest
import scipy.stats as osp_stats
import jax
from jax import grad, lax, random
import jax.numpy as np
from jax.scipy.special import logit
import numpyro.contrib.distributions as dist
from numpyro.contrib.distributions import jax_multivariate, validation_enabled
from numpyro.distributions import constraints
def idfn(param):
if isinstance(param, (osp_stats._distn_infrastructure.rv_generic,
osp_stats._multivariate.multi_rv_generic)):
return param.name
elif isinstance(param, constraints.Constraint):
return param.__class__.__name__
return repr(param)
@pytest.mark.parametrize('jax_dist', [
dist.beta,
dist.cauchy,
dist.expon,
dist.gamma,
dist.halfcauchy,
dist.halfnorm,
dist.lognorm,
dist.pareto,
dist.trunccauchy,
dist.truncnorm,
dist.norm,
dist.t,
dist.uniform,
], ids=idfn)
@pytest.mark.parametrize('loc, scale', [
(1, 1),
(1., np.array([1., 2.])),
])
@pytest.mark.parametrize('prepend_shape', [
None,
(),
(2,),
(2, 3),
])
def test_continuous_shape(jax_dist, loc, scale, prepend_shape):
rng = random.PRNGKey(0)
args = [i + 1 for i in range(jax_dist.numargs)]
expected_shape = lax.broadcast_shapes(*[np.shape(loc), np.shape(scale)])
samples = jax_dist.rvs(*args, loc=loc, scale=scale, random_state=rng)
assert isinstance(samples, jax.interpreters.xla.DeviceArray)
assert np.shape(samples) == expected_shape
assert np.shape(jax_dist(*args, loc=loc, scale=scale).rvs(random_state=rng)) == expected_shape
if prepend_shape is not None:
expected_shape = prepend_shape + lax.broadcast_shapes(*[np.shape(loc), np.shape(scale)])
assert np.shape(jax_dist.rvs(*args, loc=loc, scale=scale,
size=expected_shape, random_state=rng)) == expected_shape
assert np.shape(jax_dist(*args, loc=loc, scale=scale)
.rvs(random_state=rng, size=expected_shape)) == expected_shape
@pytest.mark.parametrize('jax_dist, dist_args, sample', [
(dist.beta, (-1, 1), -1),
(dist.beta, (2, np.array([1., -3])), np.array([1., -2])),
(dist.cauchy, (), np.inf),
(dist.cauchy, (), np.array([1., np.nan])),
(dist.expon, (), -1),
(dist.expon, (), np.array([1., -2])),
(dist.gamma, (-1,), -1),
(dist.gamma, (np.array([-2., 3]),), np.array([1., -2])),
(dist.halfcauchy, (), -1),
(dist.halfcauchy, (), np.array([1., -2])),
(dist.halfnorm, (), -1),
(dist.halfnorm, (), np.array([1., -2])),
(dist.lognorm, (-1,), -1),
(dist.lognorm, (np.array([-2., 3]),), np.array([1., -2])),
(dist.norm, (), np.inf),
(dist.norm, (), np.array([1., np.nan])),
(dist.pareto, (-1,), -1),
(dist.pareto, (np.array([-2., 3]),), np.array([1., -2])),
(dist.t, (-1,), np.inf),
(dist.t, (np.array([-2., 3]),), np.array([1., np.nan])),
(dist.trunccauchy, (), -1),
(dist.trunccauchy, (), np.array([1., -2])),
(dist.truncnorm, (), -1),
(dist.truncnorm, (), np.array([1., -2])),
(dist.uniform, (), -1),
(dist.uniform, (), np.array([0.5, -2])),
], ids=idfn)
def test_continuous_validate_args(jax_dist, dist_args, sample):
valid_args = [i + 1 for i in range(jax_dist.numargs)]
with validation_enabled():
if dist_args:
with pytest.raises(ValueError, match='Invalid parameters'):
jax_dist(*dist_args)
with pytest.raises(ValueError, match='Invalid scale parameter'):
jax_dist(*valid_args, scale=-1)
frozen_dist = jax_dist(*valid_args)
with pytest.raises(ValueError, match='Invalid values'):
frozen_dist.logpdf(sample)
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.categorical, (np.array([0.1, 0.9]),)),
(dist.categorical, (np.array([[0.1, 0.9], [0.2, 0.8]]),)),
(dist.dirichlet, (np.ones(3),)),
(dist.dirichlet, (np.ones((2, 3)),)),
(dist.multinomial, (10, np.array([0.1, 0.9]),)),
(dist.multinomial, (10, np.array([[0.1, 0.9], [0.2, 0.8]]),)),
], ids=idfn)
@pytest.mark.parametrize('prepend_shape', [
None,
(),
(2,),
(2, 3),
])
def test_multivariate_shape(jax_dist, dist_args, prepend_shape):
rng = random.PRNGKey(0)
expected_shape = jax_dist._batch_shape(*dist_args) + jax_dist._event_shape(*dist_args)
samples = jax_dist.rvs(*dist_args, random_state=rng)
assert isinstance(samples, jax.interpreters.xla.DeviceArray)
assert np.shape(samples) == expected_shape
assert np.shape(jax_dist(*dist_args).rvs(random_state=rng)) == expected_shape
if prepend_shape is not None:
size = prepend_shape + jax_dist._batch_shape(*dist_args)
expected_shape = size + jax_dist._event_shape(*dist_args)
samples = jax_dist.rvs(*dist_args, size=size, random_state=rng)
assert np.shape(samples) == expected_shape
samples = jax_dist(*dist_args).rvs(random_state=rng, size=size)
assert np.shape(samples) == expected_shape
@pytest.mark.parametrize('jax_dist, valid_args, invalid_args, invalid_sample', [
(dist.categorical, (np.array([0.1, 0.9]),), (np.array([0.1, 0.8]),), np.array([1, 4])),
(dist.dirichlet, (np.ones(3),), (np.array([-1., 2., 3.]),), np.array([0.1, 0.7, 0.1])),
(dist.multinomial, (10, np.array([0.1, 0.9]),), (10, np.array([0.2, 0.9]),), np.array([-1, 9])),
], ids=idfn)
def test_multivariate_validate_args(jax_dist, valid_args, invalid_args, invalid_sample):
with validation_enabled():
with pytest.raises(ValueError, match='Invalid parameters'):
jax_dist(*invalid_args)
frozen_dist = jax_dist(*valid_args)
with pytest.raises(ValueError, match='Invalid values'):
frozen_dist.logpmf(invalid_sample)
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.bernoulli, (0.1,)),
(dist.bernoulli, (np.array([0.3, 0.5]),)),
(dist.binom, (10, 0.4)),
(dist.binom, (np.array([10]), np.array([0.4, 0.3]))),
(dist.poisson, (1.,)),
(dist.poisson, (np.array([1., 4., 10.]),)),
], ids=idfn)
@pytest.mark.parametrize('prepend_shape', [
None,
(),
(2,),
(2, 3),
])
def test_discrete_shape(jax_dist, dist_args, prepend_shape):
rng = random.PRNGKey(0)
sp_dist = getattr(osp_stats, jax_dist.name)
expected_shape = np.shape(sp_dist.rvs(*dist_args))
samples = jax_dist.rvs(*dist_args, random_state=rng)
assert isinstance(samples, jax.interpreters.xla.DeviceArray)
assert np.shape(samples) == expected_shape
if prepend_shape is not None:
shape = prepend_shape + lax.broadcast_shapes(*[np.shape(arg) for arg in dist_args])
expected_shape = np.shape(sp_dist.rvs(*dist_args, size=shape))
assert np.shape(jax_dist.rvs(*dist_args, size=shape, random_state=rng)) == expected_shape
@pytest.mark.parametrize('jax_dist, valid_args, invalid_args, invalid_sample', [
(dist.bernoulli, (0.8,), (np.nan,), 2),
(dist.binom, (10, 0.8), (-10, 0.8), -10),
(dist.binom, (10, 0.8), (10, 1.1), -1),
(dist.poisson, (4.,), (-1.,), -1),
], ids=idfn)
def test_discrete_validate_args(jax_dist, valid_args, invalid_args, invalid_sample):
with validation_enabled():
with pytest.raises(ValueError, match='Invalid parameters'):
jax_dist(*invalid_args)
frozen_dist = jax_dist(*valid_args)
with pytest.raises(ValueError, match='Invalid values'):
frozen_dist.logpmf(invalid_sample)
@pytest.mark.parametrize('jax_dist', [
dist.beta,
dist.cauchy,
dist.expon,
dist.gamma,
dist.halfcauchy,
dist.halfnorm,
dist.lognorm,
dist.norm,
dist.pareto,
dist.t,
dist.trunccauchy,
dist.truncnorm,
dist.uniform,
], ids=idfn)
@pytest.mark.parametrize('loc, scale', [
(1., 1.),
(1., np.array([1., 2.])),
])
def test_sample_gradient(jax_dist, loc, scale):
rng = random.PRNGKey(0)
args = [i + 1 for i in range(jax_dist.numargs)]
expected_shape = lax.broadcast_shapes(*[np.shape(loc), np.shape(scale)])
def fn(args, loc, scale):
return jax_dist.rvs(*args, loc=loc, scale=scale, random_state=rng).sum()
# FIXME: find a proper test for gradients of arg parameters
assert len(grad(fn)(args, loc, scale)) == jax_dist.numargs
assert_allclose(grad(fn, 1)(args, loc, scale),
loc * reduce(mul, expected_shape[:len(expected_shape) - np.ndim(loc)], 1.))
assert_allclose(grad(fn, 2)(args, loc, scale),
jax_dist.rvs(*args, size=expected_shape, random_state=rng))
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.dirichlet, (np.ones(3),)),
(dist.dirichlet, (np.ones((2, 3)),)),
], ids=idfn)
def test_mvsample_gradient(jax_dist, dist_args):
rng = random.PRNGKey(0)
def fn(args):
return jax_dist.rvs(*args, random_state=rng).sum()
# FIXME: find a proper test for gradients of arg parameters
assert len(grad(fn)(dist_args)) == jax_dist.numargs
@pytest.mark.parametrize('jax_dist', [
dist.beta,
dist.cauchy,
dist.expon,
dist.gamma,
dist.halfcauchy,
dist.halfnorm,
dist.lognorm,
dist.norm,
dist.pareto,
dist.t,
dist.trunccauchy,
dist.truncnorm,
dist.uniform,
], ids=idfn)
@pytest.mark.parametrize('loc_scale', [
(),
(1,),
(1, 1),
(1., np.array([1., 2.])),
])
def test_continuous_logpdf(jax_dist, loc_scale):
rng = random.PRNGKey(0)
args = [i + 1 for i in range(jax_dist.numargs)] + list(loc_scale)
samples = jax_dist.rvs(*args, random_state=rng)
if jax_dist is dist.trunccauchy:
sp_dist = osp_stats.cauchy
assert_allclose(jax_dist.logpdf(samples, args[0], args[1]),
sp_dist.logpdf(samples) - np.log(sp_dist.cdf(args[1]) - sp_dist.cdf(args[0])),
atol=1e-6)
else:
sp_dist = getattr(osp_stats, jax_dist.name)
assert_allclose(jax_dist.logpdf(samples, *args), sp_dist.logpdf(samples, *args), atol=1.3e-6)
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.dirichlet, (np.array([1., 2., 3.]),)),
], ids=idfn)
@pytest.mark.parametrize('shape', [
None,
(),
(2,),
(2, 3),
])
def test_multivariate_continuous_logpdf(jax_dist, dist_args, shape):
rng = random.PRNGKey(0)
samples = jax_dist.rvs(*dist_args, size=shape, random_state=rng)
# XXX scipy.stats.dirichlet does not work with batch
if samples.ndim == 1:
sp_dist = getattr(osp_stats, jax_dist.name)
assert_allclose(jax_dist.logpdf(samples, *dist_args),
sp_dist.logpdf(samples, *dist_args), atol=1e-6)
event_dim = len(jax_dist._event_shape(*dist_args))
batch_shape = samples.shape if event_dim == 0 else samples.shape[:-1]
assert jax_dist.logpdf(samples, *dist_args).shape == batch_shape
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.categorical, (np.array([0.7, 0.3]),)),
(dist.multinomial, (10, np.array([0.3, 0.7]),)),
], ids=idfn)
@pytest.mark.parametrize('shape', [
None,
(),
(2,),
(2, 3),
])
def test_multivariate_discrete_logpmf(jax_dist, dist_args, shape):
rng = random.PRNGKey(0)
samples = jax_dist.rvs(*dist_args, size=shape, random_state=rng)
# XXX scipy.stats.multinomial does not work with batch
if samples.ndim == 1:
if jax_dist is dist.categorical:
# test against PyTorch
assert_allclose(jax_dist.logpmf(np.array([1, 0]), *dist_args),
np.array([-1.2040, -0.3567]), atol=1e-4)
else:
sp_dist = getattr(osp_stats, jax_dist.name)
assert_allclose(jax_dist.logpmf(samples, *dist_args),
sp_dist.logpmf(samples, *dist_args), atol=1e-5)
event_dim = len(jax_dist._event_shape(*dist_args))
batch_shape = samples.shape if event_dim == 0 else samples.shape[:-1]
assert jax_dist.logpmf(samples, *dist_args).shape == batch_shape
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.bernoulli, (0.1,)),
(dist.bernoulli, (np.array([0.3, 0.5]),)),
(dist.binom, (10, 0.4)),
(dist.binom, (np.array([10]), np.array([0.4, 0.3]))),
(dist.binom, (np.array([2, 5]), np.array([[0.4], [0.5]]))),
(dist.poisson, (4.,)),
(dist.poisson, (np.array([1., 4., 10.]),)),
], ids=idfn)
@pytest.mark.parametrize('shape', [
None,
(),
(2,),
(2, 3),
])
def test_discrete_logpmf(jax_dist, dist_args, shape):
rng = random.PRNGKey(0)
sp_dist = getattr(osp_stats, jax_dist.name)
samples = jax_dist.rvs(*dist_args, random_state=rng)
assert_allclose(jax_dist.logpmf(samples, *dist_args),
sp_dist.logpmf(onp.asarray(samples), *dist_args),
rtol=1e-5)
if shape is not None:
shape = shape + lax.broadcast_shapes(*[np.shape(arg) for arg in dist_args])
samples = jax_dist.rvs(*dist_args, size=shape, random_state=rng)
assert_allclose(jax_dist.logpmf(samples, *dist_args),
sp_dist.logpmf(onp.asarray(samples), *dist_args),
rtol=1e-5)
def fn(sample, *args):
return np.sum(jax_dist.logpmf(sample, *args))
for i in range(len(dist_args)):
logpmf_grad = grad(fn, i + 1)(samples, *dist_args)
assert np.all(np.isfinite(logpmf_grad))
@pytest.mark.parametrize('jax_dist, dist_args', [
(dist.bernoulli, (0.1,)),
(dist.bernoulli, (np.array([0.3, 0.5]),)),
(dist.binom, (10, 0.4)),
(dist.binom, (np.array([10]), np.array([0.4, 0.3]))),
(dist.binom, (np.array([2, 5]), np.array([[0.4], [0.5]]))),
(dist.categorical, (np.array([0.1, 0.9]),)),
(dist.categorical, (np.array([[0.1, 0.9], [0.2, 0.8]]),)),
(dist.multinomial, (10, np.array([0.1, 0.9]),)),
(dist.multinomial, (10, np.array([[0.1, 0.9], [0.2, 0.8]]),)),
], ids=idfn)
def test_discrete_with_logits(jax_dist, dist_args):
rng = random.PRNGKey(0)
logit_to_prob = np.log if isinstance(jax_dist, jax_multivariate) else logit
logit_args = dist_args[:-1] + (logit_to_prob(dist_args[-1]),)
actual_sample = jax_dist.rvs(*dist_args, random_state=rng)
expected_sample = jax_dist(*logit_args, is_logits=True).rvs(random_state=rng)
assert_allclose(actual_sample, expected_sample)
actual_pmf = jax_dist.logpmf(actual_sample, *dist_args)
expected_pmf = jax_dist(*logit_args, is_logits=True).logpmf(actual_sample)
assert_allclose(actual_pmf, expected_pmf, rtol=1e-6)
| nilq/baby-python | python |
import os
import os.path as osp
import sys
import numpy.random
import torch.nn
from deltalogger.deltalogger import Deltalogger
from reinforce_modules.utils import ConfusionGame, get_defense_visual_fool_model
from utils.train_utils import StateCLEVR, ImageCLEVR_HDF5
sys.path.insert(0, osp.abspath('.'))
import random
import argparse
from modules.embedder import *
import seaborn as sns
from reinforce_modules.policy_networks import Re1nforceTrainer, PolicyNet
sns.set_style('darkgrid')
def _print(something):
print(something, flush=True)
return
def PolicyEvaluation(args, seed=1, logger=None):
torch.manual_seed(seed)
random.seed(seed)
numpy.random.seed(seed)
effective_range_name = 'all'
BS = args.bs
TRAIN_DURATION = args.train_duration
if osp.exists(f'./results/experiment_reinforce_increasing_defense_{args.defense_level}'):
pass
else:
os.mkdir(f'./results/experiment_reinforce_increasing_defense_{args.defense_level}')
if args.backend == 'states':
load_from = './results/experiment_rn/mos_epoch_164.pt'
else:
load_from = './results/experiment_fp/mos_epoch_219.pt'
model, (
model_fool,
resnet), val_dataloader, predictions_before_pre_calc, initial_example = get_defense_visual_fool_model(
device=args.device,
load_from=load_from,
scenes_path=args.scenes_path,
questions_path=args.questions_path,
clvr_path=args.clvr_path,
batch_size=BS,
defense_level=args.defense_level)
rl_game = ConfusionGame(testbed_model=model,
confusion_model=model_fool,
device='cuda',
batch_size=BS,
confusion_weight=args.confusion_weight,
change_weight=args.change_weight,
fail_weight=args.fail_weight,
invalid_weight=args.invalid_weight,
mode=args.mode,
render=args.mode == 'visual',
backend=args.backend)
if args.backend == 'states':
input_size = 512
elif args.backend == 'pixels':
input_size = 256
else:
raise ValueError(f"Backend must be [states/pixels] you entered: {args.backend}")
model = PolicyNet(input_size=input_size, hidden_size=512, dropout=0.0, reverse_input=True)
trainer = Re1nforceTrainer(model=model,
game=rl_game,
dataloader=val_dataloader,
device=args.device,
lr=args.lr,
train_duration=TRAIN_DURATION,
batch_size=BS,
name=effective_range_name,
predictions_before_pre_calc=predictions_before_pre_calc,
resnet=resnet,
fool_model_name='Defense',
initial_example=initial_example)
best_drop, best_confusion = trainer.train(log_every=-1, save_every=100, logger=logger)
return best_drop, best_confusion
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, help='cpu or cuda', default='cuda')
parser.add_argument('--scenes_path', type=str, help='folder of scenes', default='data/')
parser.add_argument('--questions_path', type=str, help='folder of questions', default='data/')
parser.add_argument('--clvr_path', type=str, help='folder before images', default='data/')
parser.add_argument('--use_cache', type=int, help='if to use cache (only in image clever)', default=0)
parser.add_argument('--use_hdf5', type=int, help='if to use hdf5 loader', default=0)
parser.add_argument('--confusion_weight', type=float, help='what kind of experiment to run', default=1)
parser.add_argument('--change_weight', type=float, help='what kind of experiment to run', default=0.1)
parser.add_argument('--fail_weight', type=float, help='what kind of experiment to run', default=-0.1)
parser.add_argument('--invalid_weight', type=float, help='what kind of experiment to run', default=-0.8)
parser.add_argument('--train_duration', type=int, help='what kind of experiment to run', default=30)
parser.add_argument('--lr', type=float, help='what kind of experiment to run', default=5e-4)
parser.add_argument('--bs', type=int, help='what kind of experiment to run', default=10)
parser.add_argument('--mode', type=str, help='state | visual | imagenet', default='visual')
parser.add_argument('--range', type=float, default=-1)
parser.add_argument('--seed', type=int, default=51)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--backend', type=str, help='states or pixels', default='states')
parser.add_argument('--defense_level', type=int, default=10)
args = parser.parse_args()
if args.backend == 'states':
exp_name = 'DeltaDefense'
elif args.backend == 'pixels':
exp_name = 'DeltaDefensePixels'
else:
raise ValueError(f'Backend has to be one of states/pixels, you entered : {args.backend}')
if args.repeat == 1:
logger = Deltalogger(exp_name, run_tag=[args.defense_level, 0], dummy=True)
_print(PolicyEvaluation(args, args.seed, logger=logger))
else:
acc_drops = []
cons_drops = []
for seed in range(args.seed, args.repeat + args.seed):
experiment_number = seed - args.seed
logger = Deltalogger(exp_name, run_tag=[args.defense_level, experiment_number],
dummy=False)
a, c = PolicyEvaluation(args, seed, logger=logger)
acc_drops.append(a)
cons_drops.append(c)
_print(f'Accuracy: Min: {min(acc_drops)}, Mean: {sum(acc_drops) / len(acc_drops)}, Max: {max(acc_drops)}')
_print(
f'Consistency: Min: {min(cons_drops)}, Mean: {sum(cons_drops) / len(cons_drops)}, Max: {max(cons_drops)}')
| nilq/baby-python | python |
from django.conf import settings
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from api.search.product import views
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r"search", views.ProductDocumentView, basename="product_search")
urlpatterns = [
path("", include(router.urls)),
path("suggest/", views.ProductSuggestDocumentView.as_view(), name="product_suggest"),
path("spire/<str:pk>/comment/", views.CommentView.as_view(), name="retrieve_spire_product_comment"),
path("lite/<uuid:pk>/comment/", views.CommentView.as_view(), name="retrieve_lite_product_comment"),
path("lite/<uuid:pk>/", views.RetrieveLiteProductView.as_view(), name="retrieve_lite_product"),
path("more-like-this/<str:pk>/", views.MoreLikeThisView.as_view(), name="more_like_this"),
path("more-like-this/<uuid:pk>/", views.MoreLikeThisView.as_view(), name="more_like_this"),
]
if settings.ENABLE_SPIRE_SEARCH:
urlpatterns += [
path("spire/<str:pk>/", views.RetrieveSpireProductView.as_view(), name="retrieve_spire_product"),
]
| nilq/baby-python | python |
import copy
import numpy as np
# configure matplotlib for use without xserver
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def get_neuron_features(features, neurons):
"""
Gets neuron activations from activations specified by `neurons`.
Args:
features: numpy arraylike of shape `[n,d]`
neurons: numpy arraylike of shape `[k]` (where k is the number of neuron activations to select)
used to index neuron activations from `features`. `1<=neurons[i]<=d` for all `i`
Returns:
numpy arraylike of shape `[n,k]`
"""
return np.reshape(features[:,neurons], [len(features), -1])
def mask_neuron_weights(weights, neurons, inplace=False):
"""
Zero masks rows of weights specified by neurons
Args:
weights: numpy array like of shape `[d,num_classes]`
neurons: 1D numpy array of shape `[k]`. `1<=neurons[i]<d` for all `i`
inplace: Boolean specifying whether to mask `weights` in place in addition to returning masked_vals
Returns:
masked_vals: zero masked `weights` with mask specified by `neurons`
"""
mask = np.zeros_like(weights)
mask[neurons,np.arange(mask.shape[-1])] = 1
masked_vals = weights*mask
if inplace:
weights[:] = masked_vals
return masked_vals
def get_masked_model(log_reg_model, top_neurons):
masked_log_reg_model = copy.copy(log_reg_model)
masked_log_reg_model.coef_ = mask_neuron_weights(masked_log_reg_model.coef_.T, top_neurons).T
return masked_log_reg_model
def get_top_k_neuron_weights(weights, k=1):
"""
Get's the indices of the top weights based on the l1 norm contributions of the weights
based off of https://rakeshchada.github.io/Sentiment-Neuron.html interpretation of
https://arxiv.org/pdf/1704.01444.pdf (Radford et. al)
Args:
weights: numpy arraylike of shape `[d,num_classes]`
k: integer specifying how many rows of weights to select
Returns:
k_indices: numpy arraylike of shape `[k]` specifying indices of the top k rows
"""
weight_penalties = np.squeeze(np.linalg.norm(weights, ord=1, axis=1))
if k == 1:
k_indices = np.array([np.argmax(weight_penalties)])
elif k >= np.log(len(weight_penalties)):
# runs O(nlogn)
k_indices = np.argsort(weight_penalties)[-k:][::-1]
else:
# runs O(n+klogk)
k_indices = np.argpartition(weight_penalties, -k)[-k:]
k_indices = (k_indices[np.argsort(weight_penalties[k_indices])])[::-1]
return k_indices
def plot_logit_and_save(logits, labels, logit_index, name):
"""
Plots histogram (wrt to what label it is) of logit corresponding to logit_index.
Saves plotted histogram to name.
Args:
logits:
labels:
logit_index:
name:
"""
logit = logits[:,logit_index]
plt.title('Distribution of Logit Values')
plt.ylabel('# of logits per bin')
plt.xlabel('Logit Value')
plt.hist(logit[labels < .5], bins=25, alpha=0.5, label='neg')
plt.hist(logit[labels >= .5], bins=25, alpha=0.5, label='pos')
plt.legend()
plt.savefig(name+'.png')
plt.clf()
def plot_weight_contribs_and_save(coef, name):
plt.title('Values of Resulting L1 Penalized Weights')
plt.tick_params(axis='both', which='major')
coef = normalize(coef)
plt.plot(range(len(coef[0])), coef.T)
plt.xlabel('Neuron (Feature) Index')
plt.ylabel('Neuron (Feature) weight')
plt.savefig(name)
plt.clf()
def normalize(coef):
norm = np.linalg.norm(coef)
coef = coef/norm
return coef
| nilq/baby-python | python |
'''
Multiples of 3 and 5
'''
sum = 0
for i in range(1000):
if i%3 == 0 or i%5 == 0:
sum = sum + i
print sum
| nilq/baby-python | python |
#!/usr/bin/env python
import sys, gym, time
#
# Test yourself as a learning agent! Pass environment name as a command-line argument, for example:
#
# python keyboard_agent.py SpaceInvadersNoFrameskip-v4
#
import gym_game
import pygame
if len(sys.argv) < 3:
print('Usage: python keyboard_agent.py ENV_NAME CONFIG_FILE')
sys.exit(-1)
env_name = sys.argv[1]
print('Making Gym[PyGame] environment:', env_name)
config_file = sys.argv[2]
print('Config file:', config_file)
env = gym.make(env_name, config_file=config_file)
sleep_time = 0.1
if not hasattr(env.action_space, 'n'):
raise Exception('Keyboard agent only supports discrete action spaces')
ACTIONS = env.action_space.n
print("ACTIONS={}".format(ACTIONS))
print("Press keys 1 2 3 ... to take actions 1 2 3 ... etc.")
print("No keys pressed is taking action 0")
render_mode = 'human'
# render_mode = 'rgb_array'
env.use_wall_clock = True
env.reset()
#env.render(render_mode)
def get_action(pressed_keys):
action = None
if pressed_keys[pygame.K_0] == 1:
action = 0
elif pressed_keys[pygame.K_1] == 1:
action = 1
elif pressed_keys[pygame.K_2] == 1:
action = 2
elif pressed_keys[pygame.K_3] == 1:
action = 3
elif pressed_keys[pygame.K_4] == 1:
action = 4
elif pressed_keys[pygame.K_5] == 1:
action = 5
elif pressed_keys[pygame.K_6] == 1:
action = 6
elif pressed_keys[pygame.K_7] == 1:
action = 7
elif pressed_keys[pygame.K_8] == 1:
action = 8
elif pressed_keys[pygame.K_9] == 1:
action = 9
if action is None:
action = 0
return action
def rollout(env):
observation = env.reset()
quit = False
total_reward = 0
total_timesteps = 0
while 1:
# Check for quit from user
events = env.get_events()
for event in events:
if event.type == pygame.QUIT:
quit = True
print('Quit event')
# Get selected action from user
pressed_keys = env.get_keys_pressed()
a = get_action(pressed_keys)
# Update the environment
observation, reward, done, info = env.step(a)
total_timesteps += 1
total_reward += reward
# print('Obs: ',str(observation))
# Render the new state
img = env.render(mode=render_mode, close=quit) # Render the game
# Handle quit request
if quit:
print('Quitting (truncating rollout)...')
break
if done:
print('Episode (rollout) complete.')
env.reset()
break
# Wait a short time
time.sleep(sleep_time)
print("Rollout summary: Timesteps %i Reward %0.2f" % (total_timesteps, total_reward))
return quit
while 1:
quit = rollout(env)
if quit:
break
| nilq/baby-python | python |
import enum
import re
import string
from typing import Text, List
from xml.sax import saxutils
import emoji
from six import string_types
from collections.abc import Iterable
from tklearn.preprocessing import TextPreprocessor
__all__ = [
'Normalize',
'TweetPreprocessor',
]
@enum.unique
class Normalize(enum.Enum):
NONE = 0
ALL = 1
LINKS = 2
HASHTAGS = 3
MENTIONS = 4
IMAGES = 5
class TweetPreprocessor(TextPreprocessor):
""" Preprocessor for Tweets.
Instance of this class can be used to create a preprocessor for tour tweet data.
Several options are provided and you might be using them according to your use case.
"""
RE_LINKS = re.compile(r'(https?://\S+)')
RE_IMAGE_LINKS = re.compile(r'(pic.twitter.com\S+)')
RE_MENTIONS = re.compile(r'(@[a-zA-Z0-9_]{1,15})')
RE_HASHTAGS = re.compile(r'(#\w+)')
def __init__(self, normalize=Normalize.NONE, lowercase=False, **kwargs):
""" Initialize `TweetPreprocessor` object.
Parameters
----------
kwargs
Parameters
"""
super(TweetPreprocessor, self).__init__()
self.normalize = []
self.lowercase = lowercase
if normalize == Normalize.ALL:
self.normalize = [
Normalize.LINKS,
Normalize.HASHTAGS,
Normalize.MENTIONS,
Normalize.IMAGES,
]
elif (normalize != Normalize.NONE) and isinstance(normalize, Iterable):
for item in normalize:
if isinstance(item, string_types):
if not item.endswith('s'):
item = '{}s'.format(item)
item = Normalize[item.upper()]
self.normalize.append(item)
@staticmethod
def _replace(s: List[Text], old: Text, new: Text) -> List[Text]:
return [new if x == old else x for x in s if x.strip() != '']
def preprocess(self, s: Text) -> Text:
""" Preprocess the input text. Expected input is a Tweet text.
Parameters
----------
s
Input Tweet text.
Returns
-------
Preprocessed tweet.
"""
s = self._clean_tweet(s)
if Normalize.LINKS in self.normalize:
s = self.RE_LINKS.sub('<link>', s)
if Normalize.IMAGES in self.normalize:
s = self.RE_IMAGE_LINKS.sub('<image>', s)
if Normalize.HASHTAGS in self.normalize:
s = self.RE_HASHTAGS.sub('<hashtag>', s)
if Normalize.MENTIONS in self.normalize:
s = self.RE_MENTIONS.sub('<mention>', s)
tokens = s.split()
for ns in self.normalize:
if isinstance(ns, str):
pass
elif isinstance(ns, tuple):
assert len(ns) == 2, \
'Required a tuple of size 2 indicating (new_word, old_words) values for the normalization.'
assert isinstance(ns[1], list), \
'Required a list of old values to replace with the new value.'
for n in ns[1]:
tokens = self._replace(tokens, n, ns[0])
if self.lowercase:
return ' '.join(tokens).lower()
else:
return ' '.join(tokens)
@staticmethod
def _clean_tweet(x):
""" Cleans a given text (tweet) while keeping important characters.
Parameters
----------
x
Input String.
Returns
-------
Cleaned Text.
"""
x = saxutils.unescape(x)
x = x.replace('\xa0', ' ')
x = emoji.demojize(x)
x = ''.join(filter(lambda item: item in set(string.printable), x))
x = emoji.emojize(x)
return x
| nilq/baby-python | python |
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS
import math
import pickle
app = Flask(__name__)
CORS(app)
uniq_fire_date = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
uniq_county = ['No Data', 'Skamania', 'Cowlitz', 'Thurston', 'Okanogan', 'Pacific', 'Clark', 'Columbia', 'Grays Harbor', 'Adams', 'Benton', 'Asotin', 'Stevens', 'Chelan', 'Klickitat', 'King', 'Lewis', 'Douglas', 'Franklin', 'Jefferson', 'San Juan', 'Kittitas', 'Garfield', 'Grant', 'Pierce', 'Wahkiakum', 'Ferry', 'Clallam', 'Spokane', 'Mason', 'Skagit', 'Pend Oreille', 'Walla Walla', 'Whatcom', 'Kitsap', 'Lincoln', 'Island', 'Snohomish', 'Yakima', 'Whitman']
uniq_cause = ['Smoker', 'Miscellaneou', 'Under Invest', 'Logging', 'Debris Burn', 'Undetermined', 'Recreation', 'Railroad', 'Lightning', 'Children', 'Arson', 'None']
uniq_binlat = [1, 2, 3, 4]
uniq_binlon = [1, 2, 3, 4, 5, 6, 7, 8]
uniq_binacres = [2, 3, 4, 5, 6, 7, 8, 9]
# def binLat(lat):
# print(lat)
# if lat > 48:
# return 1
# elif 48 >= lat > 47:
# return 2
# elif 47 >= lat > 46:
# return 3
# elif 46 >= lat > 45:
# return 4
# else:
# return 5
# def binLon(lon):
# if lon < -124:
# return 1
# elif -124 <= lon < -123:
# return 2
# elif -123 <= lon < -122:
# return 3
# elif -122 <= lon < -121:
# return 4
# elif -121 <= lon < -120:
# return 5
# elif -120 <= lon < -119:
# return 6
# elif -119 <= lon < -118:
# return 7
# else:
# return 8
def unBinAcres(acres_binned):
if acres_binned == 1:
return "0-2"
elif acres_binned == 2:
return "2-10"
elif acres_binned == 3:
return "10-50"
elif acres_binned == 4:
return "50-100"
elif acres_binned == 5:
return "100-500"
elif acres_binned == 6:
return "500-2000"
elif acres_binned == 7:
return "2000-10000"
elif acres_binned == 8:
return "10000-50000"
elif acres_binned == 9:
return "50000-300000"
else:
return "Failure to Compute..."
def acres_to_circle_radius_in_miles(acres):
sqft = acres * 43560
radius = math.sqrt(sqft / math.pi)
return radius / 5280
@app.route('/', methods=['GET'])
def main_route():
return render_template('index.html',
mth=uniq_fire_date,
cnt=uniq_county,
cau=uniq_cause,
lat=uniq_binlat,
lon=uniq_binlon,
acr=uniq_binacres)
@app.route('/api/predict', methods=["GET"])
def return_prediction():
acres = 10000
cause = request.args.get("cause", "")
county = request.args.get("county", "")
fire_date = request.args.get("month", "")
lat = request.args.get("binlat", "")
lon = request.args.get("binlon", "")
# lat = binLat(float(lat))
# lon = binLon(float(lon))
instance = [fire_date, county, cause, lat, lon]
infile = open("trees.p", "rb")
best_trees = pickle.load(infile)
infile.close()
prediction = predict_acres([instance], best_trees)
print(prediction)
if prediction is not None:
acres_binned = prediction[0]
result = {"prediction": unBinAcres(acres_binned)}
return jsonify(result), 200
else:
# failure!!
return "Error making prediction", 400
def predict_acres(X_test, best_trees):
header = []
predictions = []
for i in range(0, len(X_test[0])):
header.append("att" + str(i))
for instance in X_test:
tree_predictions = {}
for tree in best_trees:
temp = tree['tree']
prediction = tdidt_predict(header, tree['tree'], instance)
if prediction in tree_predictions:
tree_predictions[prediction] += 1
else:
tree_predictions[prediction] = 1
max_key = max(tree_predictions, key = tree_predictions.get)
predictions.append(max_key)
return predictions
def tdidt_predict(header, tree, instance):
info_type = tree[0]
if info_type == "Attribute":
attribute_index = header.index(tree[1])
instance_value = instance[attribute_index]
# now I need to find which "edge" to follow recursively
for i in range(2, len(tree)):
value_list = tree[i]
if value_list[1] == instance_value:
# we have a match!! recurse!!
return tdidt_predict(header, value_list[2], instance)
else: # "Leaf"
return tree[1] # leaf class label
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8888) | nilq/baby-python | python |
def mallow(y, y_pred, y_sub, k, p):
"""
Return an mallows Cp score for a model.
Input:
y: array-like of shape = (n_samples) including values of observed y
y_pred: vector including values of predicted y
k: int number of predictive variable(s) used in the model
p: int number of predictive variable(s) used in the sub model
Output:
mallow_score: int or float Mallows Cp score of the model and sub model
Raise InputError if k is less than p.
Raise InputError if y , y_sub and y_pred are not in same length.
Raise InputError if length(y) <= 1, length(y_sub)<=1, or length(y_pred) <= 1.
Raise TypeError if y , y_sub and y_pred are not vector.
Raise TypeError if p is not int.
Raise InputError if p < 0.
Raise TypeError if k is not int.
Raise InputError if k < 0.
"""
import numpy as np
import pandas as pd
if k<p:
raise ValueError("number of predictive variable(s) used in the model must larger than in subset model")
if len(y)!=len(y_sub) or len(y_sub)!=len(y_pred) or len(y)!= len(y_pred):
raise ValueError("The length of observed y, predicted y, and predicted y in subset model must be same")
if len(y)<=1 or len(y_sub)<=1 or len(y)<=1:
raise ValueError("The length of observed y, predicted y, and predicted y in subset model must be larger than 1")
if isinstance(y, (np.ndarray, list, tuple, pd.core.series.Series)) == False or isinstance(y_pred, (np.ndarray, list, tuple, pd.core.series.Series)) == False:
raise TypeError("The observed y, predicted y, and predicted y in subset model must be array-like shape (e.g. array, list, tuple, data column)")
else:
for i in y:
for j in y_pred:
if isinstance(i, (int, float)) != True or isinstance(j, (int, float)) != True:
raise TypeError("The observed y, predicted y, and predicted y in subset model must be numeric elements")
if isinstance(p,int) !=True or isinstance(k,int)!=True:
raise TypeError("The number of predictive variable(s) used in the sub model must be integer")
if p<=0 or k<=0:
raise Exception("The number of predictive variable(s) used in the sub model must be positive")
if isinstance(y,list)==True:
y=np.array(y)
if isinstance(y_sub,list)==True:
y_sub=np.array(y_sub)
if isinstance(y_pred,list)==True:
y_pred=np.array(y_pred)
SSE_p=np.sum((y-y_sub)**2)
MSE= np.sum((y-y_pred)**2)/(len(y)-k)
mallowcp=SSE_p/MSE-len(y)+2*p
return mallowcp
| nilq/baby-python | python |