code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from django.contrib import admin
from .models import SearchResult
# Register your models here.
class SearchResultAdmin(admin.ModelAdmin):
fields = ["query", "heading", "url", "text"]
admin.site.register(SearchResult, SearchResultAdmin) | [
"django.contrib.admin.site.register"
] | [((189, 241), 'django.contrib.admin.site.register', 'admin.site.register', (['SearchResult', 'SearchResultAdmin'], {}), '(SearchResult, SearchResultAdmin)\n', (208, 241), False, 'from django.contrib import admin\n')] |
import asyncio
import os
import tempfile
from contextlib import ExitStack
from typing import Text, Optional, List, Union, Dict
from rasa.importers.importer import TrainingDataImporter
from rasa import model
from rasa.model import FingerprintComparisonResult
from rasa.core.domain import Domain
from rasa.utils.common import TempDirectoryPath
from rasa.cli.utils import (
print_success,
print_warning,
print_error,
bcolors,
print_color,
)
from rasa.constants import DEFAULT_MODELS_PATH, DEFAULT_CORE_SUBDIRECTORY_NAME
def train(
domain: Text,
config: Text,
training_files: Union[Text, List[Text]],
output: Text = DEFAULT_MODELS_PATH,
force_training: bool = False,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
additional_arguments: Optional[Dict] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> Optional[Text]:
if loop is None:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(
train_async(
domain=domain,
config=config,
training_files=training_files,
output_path=output,
force_training=force_training,
fixed_model_name=fixed_model_name,
persist_nlu_training_data=persist_nlu_training_data,
additional_arguments=additional_arguments,
)
)
async def train_async(
domain: Union[Domain, Text],
config: Dict[Text, Text],
training_files: Optional[Union[Text, List[Text]]],
output_path: Text = DEFAULT_MODELS_PATH,
force_training: bool = False,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
additional_arguments: Optional[Dict] = None,
) -> Optional[Text]:
"""Trains a Rasa model (Core and NLU).
Args:
domain: Path to the domain file.
config: Dict of paths to the config for Core and NLU. Keys are language codes
training_files: Paths to the training data for Core and NLU.
output_path: Output path.
force_training: If `True` retrain model even if data has not changed.
fixed_model_name: Name of model to be stored.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
additional_arguments: Additional training parameters.
Returns:
Path of the trained model archive.
"""
# file_importer = TrainingDataImporter.load_from_config(
# config, domain, training_files
# )
with ExitStack() as stack:
train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp()))
# bf mod
from rasa_addons.importers import BotfrontFileImporter
file_importer = BotfrontFileImporter(config, domain, training_files)
# domain = await file_importer.get_domain()
# if domain.is_empty():
# return await handle_domain_if_not_exists(
# file_importer, output_path, fixed_model_name
# )
# /bf mod
return await _train_async_internal(
file_importer,
train_path,
output_path,
force_training,
fixed_model_name,
persist_nlu_training_data,
additional_arguments,
)
async def handle_domain_if_not_exists(
file_importer: TrainingDataImporter, output_path, fixed_model_name
):
nlu_model_only = await _train_nlu_with_validated_data(
file_importer, output=output_path, fixed_model_name=fixed_model_name
)
print_warning(
"Core training was skipped because no valid domain file was found. Only an nlu-model was created."
"Please specify a valid domain using '--domain' argument or check if the provided domain file exists."
)
return nlu_model_only
async def _train_async_internal(
file_importer: TrainingDataImporter,
train_path: Text,
output_path: Text,
force_training: bool,
fixed_model_name: Optional[Text],
persist_nlu_training_data: bool,
additional_arguments: Optional[Dict],
) -> Optional[Text]:
"""Trains a Rasa model (Core and NLU). Use only from `train_async`.
Args:
file_importer: `TrainingDataImporter` which supplies the training data.
train_path: Directory in which to train the model.
output_path: Output path.
force_training: If `True` retrain model even if data has not changed.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
fixed_model_name: Name of model to be stored.
additional_arguments: Additional training parameters.
Returns:
Path of the trained model archive.
"""
stories, nlu_data = await asyncio.gather(
file_importer.get_stories(), file_importer.get_nlu_data()
)
# if stories.is_empty() and nlu_data.is_empty():
# print_error(
# "No training data given. Please provide stories and NLU data in "
# "order to train a Rasa model using the '--data' argument."
# )
# return
# if nlu_data.is_empty():
# print_warning("No NLU data present. Just a Rasa Core model will be trained.")
# return await _train_core_with_validated_data(
# file_importer,
# output=output_path,
# fixed_model_name=fixed_model_name,
# additional_arguments=additional_arguments,
# )
new_fingerprint = await model.model_fingerprint(file_importer)
old_model = model.get_latest_model(output_path)
fingerprint_comparison = FingerprintComparisonResult(force_training=force_training)
if not force_training:
fingerprint_comparison = model.should_retrain(
new_fingerprint, old_model, train_path
)
# bf mod >
if fingerprint_comparison.nlu == True: # replace True with list of all langs
fingerprint_comparison.nlu = list(new_fingerprint.get("nlu-config", {}).keys())
domain = await file_importer.get_domain()
core_untrainable = domain.is_empty() or stories.is_empty()
nlu_untrainable = [l for l, d in nlu_data.items() if d.is_empty()]
fingerprint_comparison.core = fingerprint_comparison.core and not core_untrainable
fingerprint_comparison.nlu = [l for l in fingerprint_comparison.nlu if l not in nlu_untrainable]
if core_untrainable:
print_color("Skipping Core training since domain or stories are empty.", color=bcolors.OKBLUE)
for lang in nlu_untrainable:
print_color("No NLU data found for language <{}>, skipping training...".format(lang), color=bcolors.OKBLUE)
# </ bf mod
if fingerprint_comparison.is_training_required():
await _do_training(
file_importer,
output_path=output_path,
train_path=train_path,
fingerprint_comparison_result=fingerprint_comparison,
fixed_model_name=fixed_model_name,
persist_nlu_training_data=persist_nlu_training_data,
additional_arguments=additional_arguments,
)
return model.package_model(
fingerprint=new_fingerprint,
output_directory=output_path,
train_path=train_path,
fixed_model_name=fixed_model_name,
)
print_success(
"Nothing changed. You can use the old model stored at '{}'."
"".format(os.path.abspath(old_model))
)
return old_model
async def _do_training(
file_importer: TrainingDataImporter,
output_path: Text,
train_path: Text,
fingerprint_comparison_result: Optional[FingerprintComparisonResult] = None,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
additional_arguments: Optional[Dict] = None,
):
if not fingerprint_comparison_result:
fingerprint_comparison_result = FingerprintComparisonResult()
if fingerprint_comparison_result.should_retrain_core():
await _train_core_with_validated_data(
file_importer,
output=output_path,
train_path=train_path,
fixed_model_name=fixed_model_name,
additional_arguments=additional_arguments,
)
elif fingerprint_comparison_result.should_retrain_nlg():
print_color(
"Core stories/configuration did not change. "
"Only the templates section has been changed. A new model with "
"the updated templates will be created.",
color=bcolors.OKBLUE,
)
await model.update_model_with_new_domain(file_importer, train_path)
else:
print_color(
"Core stories/configuration did not change. No need to retrain Core model.",
color=bcolors.OKBLUE,
)
if fingerprint_comparison_result.should_retrain_nlu():
await _train_nlu_with_validated_data(
file_importer,
output=output_path,
train_path=train_path,
fixed_model_name=fixed_model_name,
retrain_nlu=fingerprint_comparison_result.nlu,
persist_nlu_training_data=persist_nlu_training_data,
)
else:
print_color(
"NLU data/configuration did not change. No need to retrain NLU model.",
color=bcolors.OKBLUE,
)
def train_core(
domain: Union[Domain, Text],
config: Text,
stories: Text,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
additional_arguments: Optional[Dict] = None,
) -> Optional[Text]:
loop = asyncio.get_event_loop()
return loop.run_until_complete(
train_core_async(
domain=domain,
config=config,
stories=stories,
output=output,
train_path=train_path,
fixed_model_name=fixed_model_name,
additional_arguments=additional_arguments,
)
)
async def train_core_async(
domain: Union[Domain, Text],
config: Text,
stories: Text,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
additional_arguments: Optional[Dict] = None,
) -> Optional[Text]:
"""Trains a Core model.
Args:
domain: Path to the domain file.
config: Path to the config file for Core.
stories: Path to the Core training data.
output: Output path.
train_path: If `None` the model will be trained in a temporary
directory, otherwise in the provided directory.
fixed_model_name: Name of model to be stored.
uncompress: If `True` the model will not be compressed.
additional_arguments: Additional training parameters.
Returns:
If `train_path` is given it returns the path to the model archive,
otherwise the path to the directory with the trained model files.
"""
file_importer = TrainingDataImporter.load_core_importer_from_config(
config, domain, [stories]
)
domain = await file_importer.get_domain()
if domain.is_empty():
print_error(
"Core training was skipped because no valid domain file was found. "
"Please specify a valid domain using '--domain' argument or check if the provided domain file exists."
)
return None
if not await file_importer.get_stories():
print_error(
"No stories given. Please provide stories in order to "
"train a Rasa Core model using the '--stories' argument."
)
return
return await _train_core_with_validated_data(
file_importer,
output=output,
train_path=train_path,
fixed_model_name=fixed_model_name,
additional_arguments=additional_arguments,
)
async def _train_core_with_validated_data(
file_importer: TrainingDataImporter,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
additional_arguments: Optional[Dict] = None,
) -> Optional[Text]:
"""Train Core with validated training and config data."""
import rasa.core.train
with ExitStack() as stack:
if train_path:
# If the train path was provided, do nothing on exit.
_train_path = train_path
else:
# Otherwise, create a temp train path and clean it up on exit.
_train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp()))
# normal (not compare) training
print_color("Training Core model...", color=bcolors.OKBLUE)
domain, config = await asyncio.gather(
file_importer.get_domain(), file_importer.get_config()
)
await rasa.core.train(
domain_file=domain,
training_resource=file_importer,
output_path=os.path.join(_train_path, DEFAULT_CORE_SUBDIRECTORY_NAME),
policy_config=config,
additional_arguments=additional_arguments,
)
print_color("Core model training completed.", color=bcolors.OKBLUE)
if train_path is None:
# Only Core was trained.
new_fingerprint = await model.model_fingerprint(file_importer)
return model.package_model(
fingerprint=new_fingerprint,
output_directory=output,
train_path=_train_path,
fixed_model_name=fixed_model_name,
model_prefix="core-",
)
return _train_path
def train_nlu(
config: Text,
nlu_data: Text,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
) -> Optional[Text]:
"""Trains an NLU model.
Args:
config: Path to the config file for NLU.
nlu_data: Path to the NLU training data.
output: Output path.
train_path: If `None` the model will be trained in a temporary
directory, otherwise in the provided directory.
fixed_model_name: Name of the model to be stored.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
Returns:
If `train_path` is given it returns the path to the model archive,
otherwise the path to the directory with the trained model files.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(
_train_nlu_async(
config,
nlu_data,
output,
train_path,
fixed_model_name,
persist_nlu_training_data,
)
)
async def _train_nlu_async(
config: Text,
nlu_data: Text,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
):
if not nlu_data:
print_error(
"No NLU data given. Please provide NLU data in order to train "
"a Rasa NLU model using the '--nlu' argument."
)
return
# training NLU only hence the training files still have to be selected
file_importer = TrainingDataImporter.load_nlu_importer_from_config(
config, training_data_paths=[nlu_data]
)
training_datas = await file_importer.get_nlu_data()
if training_datas.is_empty():
print_error(
f"Path '{nlu_data}' doesn't contain valid NLU data in it. "
"Please verify the data format. "
"The NLU model training will be skipped now."
)
return
return await _train_nlu_with_validated_data(
file_importer,
output=output,
train_path=train_path,
fixed_model_name=fixed_model_name,
persist_nlu_training_data=persist_nlu_training_data,
)
async def _train_nlu_with_validated_data(
file_importer: TrainingDataImporter,
output: Text,
train_path: Optional[Text] = None,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
retrain_nlu: Union[bool, List[Text]] = True
) -> Optional[Text]:
"""Train NLU with validated training and config data."""
import rasa.nlu.train
with ExitStack() as stack:
models = {}
from rasa.nlu import config as cfg_loader
if train_path:
# If the train path was provided, do nothing on exit.
_train_path = train_path
else:
# Otherwise, create a temp train path and clean it up on exit.
_train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp()))
# bf mod
config = await file_importer.get_nlu_config(retrain_nlu)
for lang in config:
if config[lang]:
print_color("Start training {} NLU model ...".format(lang), color=bcolors.OKBLUE)
_, models[lang], _ = await rasa.nlu.train(
config[lang],
file_importer,
_train_path,
fixed_model_name="nlu-{}".format(lang),
persist_nlu_training_data=persist_nlu_training_data,
)
else:
print_color("NLU data for language <{}> didn't change, skipping training...".format(lang), color=bcolors.OKBLUE)
# /bf mod
print_color("NLU model training completed.", color=bcolors.OKBLUE)
if train_path is None:
# Only NLU was trained
new_fingerprint = await model.model_fingerprint(file_importer)
return model.package_model(
fingerprint=new_fingerprint,
output_directory=output,
train_path=_train_path,
fixed_model_name=fixed_model_name,
model_prefix="nlu-",
)
return _train_path
| [
"rasa.model.FingerprintComparisonResult",
"rasa_addons.importers.BotfrontFileImporter",
"rasa.model.update_model_with_new_domain",
"rasa.model.should_retrain",
"asyncio.new_event_loop",
"rasa.cli.utils.print_color",
"rasa.cli.utils.print_warning",
"rasa.importers.importer.TrainingDataImporter.load_nlu_importer_from_config",
"contextlib.ExitStack",
"rasa.model.package_model",
"asyncio.get_event_loop",
"tempfile.mkdtemp",
"rasa.cli.utils.print_error",
"rasa.importers.importer.TrainingDataImporter.load_core_importer_from_config",
"rasa.model.model_fingerprint",
"os.path.join",
"rasa.model.get_latest_model",
"os.path.abspath",
"asyncio.set_event_loop"
] | [((3729, 3952), 'rasa.cli.utils.print_warning', 'print_warning', (['"""Core training was skipped because no valid domain file was found. Only an nlu-model was created.Please specify a valid domain using \'--domain\' argument or check if the provided domain file exists."""'], {}), '(\n "Core training was skipped because no valid domain file was found. Only an nlu-model was created.Please specify a valid domain using \'--domain\' argument or check if the provided domain file exists."\n )\n', (3742, 3952), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((5750, 5785), 'rasa.model.get_latest_model', 'model.get_latest_model', (['output_path'], {}), '(output_path)\n', (5772, 5785), False, 'from rasa import model\n'), ((5815, 5873), 'rasa.model.FingerprintComparisonResult', 'FingerprintComparisonResult', ([], {'force_training': 'force_training'}), '(force_training=force_training)\n', (5842, 5873), False, 'from rasa.model import FingerprintComparisonResult\n'), ((9790, 9814), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9812, 9814), False, 'import asyncio\n'), ((11124, 11202), 'rasa.importers.importer.TrainingDataImporter.load_core_importer_from_config', 'TrainingDataImporter.load_core_importer_from_config', (['config', 'domain', '[stories]'], {}), '(config, domain, [stories])\n', (11175, 11202), False, 'from rasa.importers.importer import TrainingDataImporter\n'), ((14616, 14640), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14638, 14640), False, 'import asyncio\n'), ((15390, 15484), 'rasa.importers.importer.TrainingDataImporter.load_nlu_importer_from_config', 'TrainingDataImporter.load_nlu_importer_from_config', (['config'], {'training_data_paths': '[nlu_data]'}), '(config,\n training_data_paths=[nlu_data])\n', (15440, 15484), False, 'from rasa.importers.importer import TrainingDataImporter\n'), ((2709, 2720), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (2718, 2720), False, 'from contextlib import ExitStack\n'), ((2916, 2968), 'rasa_addons.importers.BotfrontFileImporter', 'BotfrontFileImporter', (['config', 'domain', 'training_files'], {}), '(config, domain, training_files)\n', (2936, 2968), False, 'from rasa_addons.importers import BotfrontFileImporter\n'), ((5695, 5733), 'rasa.model.model_fingerprint', 'model.model_fingerprint', (['file_importer'], {}), '(file_importer)\n', (5718, 5733), False, 'from rasa import model\n'), ((5934, 5994), 'rasa.model.should_retrain', 'model.should_retrain', (['new_fingerprint', 'old_model', 'train_path'], {}), '(new_fingerprint, old_model, train_path)\n', (5954, 5994), False, 'from rasa import model\n'), ((6604, 6702), 'rasa.cli.utils.print_color', 'print_color', (['"""Skipping Core training since domain or stories are empty."""'], {'color': 'bcolors.OKBLUE'}), "('Skipping Core training since domain or stories are empty.',\n color=bcolors.OKBLUE)\n", (6615, 6702), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((7305, 7446), 'rasa.model.package_model', 'model.package_model', ([], {'fingerprint': 'new_fingerprint', 'output_directory': 'output_path', 'train_path': 'train_path', 'fixed_model_name': 'fixed_model_name'}), '(fingerprint=new_fingerprint, output_directory=\n output_path, train_path=train_path, fixed_model_name=fixed_model_name)\n', (7324, 7446), False, 'from rasa import model\n'), ((8080, 8109), 'rasa.model.FingerprintComparisonResult', 'FingerprintComparisonResult', ([], {}), '()\n', (8107, 8109), False, 'from rasa.model import FingerprintComparisonResult\n'), ((9378, 9492), 'rasa.cli.utils.print_color', 'print_color', (['"""NLU data/configuration did not change. No need to retrain NLU model."""'], {'color': 'bcolors.OKBLUE'}), "(\n 'NLU data/configuration did not change. No need to retrain NLU model.',\n color=bcolors.OKBLUE)\n", (9389, 9492), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((11297, 11488), 'rasa.cli.utils.print_error', 'print_error', (['"""Core training was skipped because no valid domain file was found. Please specify a valid domain using \'--domain\' argument or check if the provided domain file exists."""'], {}), '(\n "Core training was skipped because no valid domain file was found. Please specify a valid domain using \'--domain\' argument or check if the provided domain file exists."\n )\n', (11308, 11488), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((11591, 11724), 'rasa.cli.utils.print_error', 'print_error', (['"""No stories given. Please provide stories in order to train a Rasa Core model using the \'--stories\' argument."""'], {}), '(\n "No stories given. Please provide stories in order to train a Rasa Core model using the \'--stories\' argument."\n )\n', (11602, 11724), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((12353, 12364), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (12362, 12364), False, 'from contextlib import ExitStack\n'), ((12724, 12783), 'rasa.cli.utils.print_color', 'print_color', (['"""Training Core model..."""'], {'color': 'bcolors.OKBLUE'}), "('Training Core model...', color=bcolors.OKBLUE)\n", (12735, 12783), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((13206, 13273), 'rasa.cli.utils.print_color', 'print_color', (['"""Core model training completed."""'], {'color': 'bcolors.OKBLUE'}), "('Core model training completed.', color=bcolors.OKBLUE)\n", (13217, 13273), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((15121, 15251), 'rasa.cli.utils.print_error', 'print_error', (['"""No NLU data given. Please provide NLU data in order to train a Rasa NLU model using the \'--nlu\' argument."""'], {}), '(\n "No NLU data given. Please provide NLU data in order to train a Rasa NLU model using the \'--nlu\' argument."\n )\n', (15132, 15251), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((15594, 15750), 'rasa.cli.utils.print_error', 'print_error', (['f"""Path \'{nlu_data}\' doesn\'t contain valid NLU data in it. Please verify the data format. The NLU model training will be skipped now."""'], {}), '(\n f"Path \'{nlu_data}\' doesn\'t contain valid NLU data in it. Please verify the data format. The NLU model training will be skipped now."\n )\n', (15605, 15750), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((16444, 16455), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (16453, 16455), False, 'from contextlib import ExitStack\n'), ((17559, 17625), 'rasa.cli.utils.print_color', 'print_color', (['"""NLU model training completed."""'], {'color': 'bcolors.OKBLUE'}), "('NLU model training completed.', color=bcolors.OKBLUE)\n", (17570, 17625), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((974, 998), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (996, 998), False, 'import asyncio\n'), ((7608, 7634), 'os.path.abspath', 'os.path.abspath', (['old_model'], {}), '(old_model)\n', (7623, 7634), False, 'import os\n'), ((8493, 8683), 'rasa.cli.utils.print_color', 'print_color', (['"""Core stories/configuration did not change. Only the templates section has been changed. A new model with the updated templates will be created."""'], {'color': 'bcolors.OKBLUE'}), "(\n 'Core stories/configuration did not change. Only the templates section has been changed. A new model with the updated templates will be created.'\n , color=bcolors.OKBLUE)\n", (8504, 8683), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((8833, 8953), 'rasa.cli.utils.print_color', 'print_color', (['"""Core stories/configuration did not change. No need to retrain Core model."""'], {'color': 'bcolors.OKBLUE'}), "(\n 'Core stories/configuration did not change. No need to retrain Core model.'\n , color=bcolors.OKBLUE)\n", (8844, 8953), False, 'from rasa.cli.utils import print_success, print_warning, print_error, bcolors, print_color\n'), ((13437, 13600), 'rasa.model.package_model', 'model.package_model', ([], {'fingerprint': 'new_fingerprint', 'output_directory': 'output', 'train_path': '_train_path', 'fixed_model_name': 'fixed_model_name', 'model_prefix': '"""core-"""'}), "(fingerprint=new_fingerprint, output_directory=output,\n train_path=_train_path, fixed_model_name=fixed_model_name, model_prefix\n ='core-')\n", (13456, 13600), False, 'from rasa import model\n'), ((17788, 17950), 'rasa.model.package_model', 'model.package_model', ([], {'fingerprint': 'new_fingerprint', 'output_directory': 'output', 'train_path': '_train_path', 'fixed_model_name': 'fixed_model_name', 'model_prefix': '"""nlu-"""'}), "(fingerprint=new_fingerprint, output_directory=output,\n train_path=_train_path, fixed_model_name=fixed_model_name, model_prefix\n ='nlu-')\n", (17807, 17950), False, 'from rasa import model\n'), ((1047, 1071), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (1069, 1071), False, 'import asyncio\n'), ((1084, 1112), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (1106, 1112), False, 'import asyncio\n'), ((2790, 2808), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2806, 2808), False, 'import tempfile\n'), ((8753, 8814), 'rasa.model.update_model_with_new_domain', 'model.update_model_with_new_domain', (['file_importer', 'train_path'], {}), '(file_importer, train_path)\n', (8787, 8814), False, 'from rasa import model\n'), ((13379, 13417), 'rasa.model.model_fingerprint', 'model.model_fingerprint', (['file_importer'], {}), '(file_importer)\n', (13402, 13417), False, 'from rasa import model\n'), ((17729, 17767), 'rasa.model.model_fingerprint', 'model.model_fingerprint', (['file_importer'], {}), '(file_importer)\n', (17752, 17767), False, 'from rasa import model\n'), ((12654, 12672), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (12670, 12672), False, 'import tempfile\n'), ((13040, 13097), 'os.path.join', 'os.path.join', (['_train_path', 'DEFAULT_CORE_SUBDIRECTORY_NAME'], {}), '(_train_path, DEFAULT_CORE_SUBDIRECTORY_NAME)\n', (13052, 13097), False, 'import os\n'), ((16816, 16834), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (16832, 16834), False, 'import tempfile\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:
''' PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
from easyai.base_name.block_name import NormalizationType, ActivationType
from easyai.base_name.backbone_name import BackboneName
from easyai.model.backbone.utility.base_backbone import *
from easyai.model.base_block.utility.utility_block import ConvBNActivationBlock
from easyai.model.base_block.cls.pnasnet_block import CellA, CellB
__all__ = ['pnasnet_A', 'pnasnet_B']
class PNASNet(BaseBackbone):
def __init__(self, data_channel=3, num_cells=6,
num_planes=44, block=CellA,
bnName=NormalizationType.BatchNormalize2d,
activationName=ActivationType.ReLU):
super().__init__()
self.set_name(BackboneName.PNASNetA)
self.data_channel = data_channel
self.num_cells = num_cells
self.block = block
self.activation_name = activationName
self.bn_name = bnName
self.first_output = num_planes
self.in_planes = self.first_output
self.create_block_list()
def create_block_list(self):
self.block_out_channels = []
self.index = 0
layer1 = ConvBNActivationBlock(in_channels=self.data_channel,
out_channels=self.first_output,
kernel_size=3,
stride=1,
padding=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(layer1.get_name(), layer1, self.first_output)
self.make_layer(self.first_output, self.num_cells)
self.downsample(self.first_output * 2)
self.make_layer(self.first_output * 2, self.num_cells)
self.downsample(self.first_output * 4)
self.make_layer(self.first_output * 4, self.num_cells)
def make_layer(self, planes, num_cells):
for _ in range(num_cells):
temp_block = self.block(self.in_planes, planes, stride=1,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(temp_block.get_name(), temp_block, planes)
self.in_planes = planes
def downsample(self, planes):
down_block = self.block(self.in_planes, planes, stride=2,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(down_block.get_name(), down_block, planes)
self.in_planes = planes
def forward(self, x):
output_list = []
for block in self._modules.values():
x = block(x)
output_list.append(x)
return output_list
def pnasnet_A(data_channel):
model = PNASNet(data_channel=data_channel,
num_cells=6,
num_planes=44,
block=CellA)
model.set_name(BackboneName.PNASNetA)
return model
def pnasnet_B(data_channel):
model = PNASNet(data_channel=data_channel,
num_cells=6, num_planes=32,
block=CellB)
model.set_name(BackboneName.PNASNetB)
return model
| [
"easyai.model.base_block.utility.utility_block.ConvBNActivationBlock"
] | [((1225, 1425), 'easyai.model.base_block.utility.utility_block.ConvBNActivationBlock', 'ConvBNActivationBlock', ([], {'in_channels': 'self.data_channel', 'out_channels': 'self.first_output', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)', 'bnName': 'self.bn_name', 'activationName': 'self.activation_name'}), '(in_channels=self.data_channel, out_channels=self.\n first_output, kernel_size=3, stride=1, padding=1, bias=False, bnName=\n self.bn_name, activationName=self.activation_name)\n', (1246, 1425), False, 'from easyai.model.base_block.utility.utility_block import ConvBNActivationBlock\n')] |
# -*- coding: utf-8 -*-
# coding=utf-8
import json
import os
import math
import logging
import requests
import time
from map_download.cmd.BaseDownloader import DownloadEngine, BaseDownloaderThread, latlng2tile_terrain, BoundBox
def get_access_token(token):
resp = None
request_count = 0
url = "https://api.cesium.com/v1/assets/1/endpoint"
while True:
if request_count > 4:
break
try:
request_count += 1
param = {'access_token': token}
resp = requests.get(url, params=param, timeout=2)
if resp.status_code != 200:
continue
break
except Exception as e:
resp = None
time.sleep(3)
if resp is None:
return None
resp_json = resp.json()
return resp_json.get('accessToken')
class TerrainDownloaderThread(BaseDownloaderThread):
URL = "https://assets.cesium.com/1/{z}/{x}/{y}.terrain?extensions=octvertexnormals-watermask&v=1.1.0"
def __init__(self, root_dir, bbox, token, task_q, logger=None, write_db=False):
super(TerrainDownloaderThread, self).__init__(
root_dir, bbox, task_q, logger, write_db=write_db, db_file_name='Terrain.db')
self.token = token
self._init_metadata(
format='terrain',
bounds='%f,%f,%f,%f' % (self.bbox.min_lng, self.bbox.min_lat, self.bbox.max_lng, self.bbox.max_lat))
def get_url(self, x, y, z):
return self.URL.format(x=x, y=y, z=z)
def _download(self, x, y, z):
file_path = '%s/%s/%i/%i/%i.%s' % (self.root_dir, 'Terrain', z, x, y, 'terrain')
if os.path.exists(file_path):
self._data2DB(x, y, z, file_path)
return 0
os.makedirs(os.path.dirname(file_path), exist_ok=True)
resp = None
requre_count = 0
_url = ''
access_token = get_access_token(self.token)
if access_token is None:
return -1
param = {'extensions': 'octvertexnormals-watermask', 'v': '1.1.0', 'access_token': access_token}
while True:
if requre_count > 4: break
try:
_url = self.get_url(x, y, z)
resp = requests.get(_url, params=param, stream=True, timeout=2)
break
except Exception as e:
resp = None
time.sleep(3)
requre_count += 1
if resp is None:
return -1
if resp.status_code != 200:
return -1
try:
with open(file_path, 'wb') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
except Exception as e:
return -1
self._data2DB(x, y, z, file_path)
return 1
class TerrainDownloadEngine(DownloadEngine):
root_dir = ''
def __init__(self, root_dir, bbox, token, thread_num, logger=None, write_db=False):
super(TerrainDownloadEngine, self).__init__(bbox, thread_num, logger, write_db=write_db)
self.root_dir = root_dir
self.token = token
def bbox2xyz(self, bbox, z):
min_x, min_y = latlng2tile_terrain(bbox.min_lat, bbox.min_lng, z)
max_x, max_y = latlng2tile_terrain(bbox.max_lat, bbox.max_lng, z)
return math.floor(min_x), math.floor(min_y), math.ceil(max_x) + 1, math.ceil(max_y) + 1
def generate_metadata(self):
try:
metadatas = {
"attribution": "© Analytical Graphics Inc., © CGIAR-CSI, Produced using Copernicus data and "
"information funded by the European Union - EU-DEM layers",
"available": [
[
{
"endX": 1,
"endY": 0,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 3,
"endY": 1,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 7,
"endY": 3,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 15,
"endY": 7,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 31,
"endY": 15,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 63,
"endY": 31,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 127,
"endY": 63,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 255,
"endY": 127,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 511,
"endY": 255,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 1023,
"endY": 511,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 2047,
"endY": 1023,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 4095,
"endY": 2047,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 8191,
"endY": 4095,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 16383,
"endY": 8191,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 32767,
"endY": 16383,
"startX": 0,
"startY": 0
}
]
],
"bounds": [-180, -90, 180, 90, ],
"description": "STK World Terrain Premium Tileset, v1.3. 10m - 30m resolution CONUS, 30m resolution "
"SRTM between 60N and 60S, 30m Europe. Minimum global coverage of 1000m.",
"extensions": ["watermask", "vertexnormals", "octvertexnormals", ],
"format": "quantized-mesh-1.0",
"maxzoom": 13,
"minzoom": 0,
"name": "world",
"projection": "EPSG:4326",
"scheme": "tms",
"tilejson": "2.1.0",
"tiles": ["{z}/{x}/{y}.terrain?v={version}", ],
"version": "1.31376.0"
}
_dir = os.path.join(self.root_dir, 'Terrain')
os.makedirs(_dir, exist_ok=True)
metadatas_path = os.path.join(_dir, 'layer.json')
with open(metadatas_path, 'w') as f:
json.dump(metadatas, f)
except Exception as e:
if self.logger is not None:
self.logger.exception(e)
def run(self):
try:
self.generate_metadata()
count = 0
bboxs = self.cut_bbox()
for bbox in bboxs:
_count = self.get_task_count(bbox)
count += _count
self.division_done_signal.emit(count)
for bbox in bboxs:
while True:
if not self.running:
time.sleep(0.01)
else:
break
task_q = self.get_task_queue(bbox)
self.threads = []
for i in range(self.thread_num):
thread = TerrainDownloaderThread(self.root_dir, self.bbox, self.token, task_q, self.logger,
write_db=self.write_db)
thread.sub_progressBar_updated_signal.connect(self.sub_update_progressBar)
self.threads.append(thread)
for thread in self.threads:
thread.start()
for thread in self.threads:
thread.wait()
for t in self.threads:
t.stop()
t.quit()
self.threads = []
self.download_done_signal.emit()
except Exception as e:
if self.logger is not None:
self.logger.error(e)
if __name__ == '__main__':
if 1:
logger = logging.getLogger('down')
try:
root = r'/Users/cugxy/Documents/data/downloader'
formatter = logging.Formatter('%(levelname)s-%(message)s')
hdlr = logging.StreamHandler()
log_file = os.path.join(root, 'down.log')
file_hdlr = logging.FileHandler(log_file)
file_hdlr.setFormatter(formatter)
logger.addHandler(file_hdlr)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
min_lng = -180.0
max_lng = 180.0
min_lat = -90.0
max_lat = 90.0
start_zoom = 0
end_zoom = 5
bbox = BoundBox(max_lat, max_lng, min_lat, min_lng, start_zoom, end_zoom)
d = TerrainDownloadEngine(root, bbox, 8, logger)
d.start()
time.sleep(10000)
logger.error('main thread out')
except Exception as e:
logger.error(e)
if 0:
accessToken = get_access_token()
pass
| [
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"math.ceil",
"os.makedirs",
"map_download.cmd.BaseDownloader.latlng2tile_terrain",
"math.floor",
"logging.Formatter",
"map_download.cmd.BaseDownloader.BoundBox",
"os.path.join",
"requests.get",
"time.sleep",
"os.path.dirname",
"logging.FileHandler",
"json.dump"
] | [((1646, 1671), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1660, 1671), False, 'import os\n'), ((3201, 3251), 'map_download.cmd.BaseDownloader.latlng2tile_terrain', 'latlng2tile_terrain', (['bbox.min_lat', 'bbox.min_lng', 'z'], {}), '(bbox.min_lat, bbox.min_lng, z)\n', (3220, 3251), False, 'from map_download.cmd.BaseDownloader import DownloadEngine, BaseDownloaderThread, latlng2tile_terrain, BoundBox\n'), ((3275, 3325), 'map_download.cmd.BaseDownloader.latlng2tile_terrain', 'latlng2tile_terrain', (['bbox.max_lat', 'bbox.max_lng', 'z'], {}), '(bbox.max_lat, bbox.max_lng, z)\n', (3294, 3325), False, 'from map_download.cmd.BaseDownloader import DownloadEngine, BaseDownloaderThread, latlng2tile_terrain, BoundBox\n'), ((10233, 10258), 'logging.getLogger', 'logging.getLogger', (['"""down"""'], {}), "('down')\n", (10250, 10258), False, 'import logging\n'), ((526, 568), 'requests.get', 'requests.get', (['url'], {'params': 'param', 'timeout': '(2)'}), '(url, params=param, timeout=2)\n', (538, 568), False, 'import requests\n'), ((1760, 1786), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (1775, 1786), False, 'import os\n'), ((3341, 3358), 'math.floor', 'math.floor', (['min_x'], {}), '(min_x)\n', (3351, 3358), False, 'import math\n'), ((3360, 3377), 'math.floor', 'math.floor', (['min_y'], {}), '(min_y)\n', (3370, 3377), False, 'import math\n'), ((8434, 8472), 'os.path.join', 'os.path.join', (['self.root_dir', '"""Terrain"""'], {}), "(self.root_dir, 'Terrain')\n", (8446, 8472), False, 'import os\n'), ((8485, 8517), 'os.makedirs', 'os.makedirs', (['_dir'], {'exist_ok': '(True)'}), '(_dir, exist_ok=True)\n', (8496, 8517), False, 'import os\n'), ((8547, 8579), 'os.path.join', 'os.path.join', (['_dir', '"""layer.json"""'], {}), "(_dir, 'layer.json')\n", (8559, 8579), False, 'import os\n'), ((10357, 10403), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s-%(message)s"""'], {}), "('%(levelname)s-%(message)s')\n", (10374, 10403), False, 'import logging\n'), ((10423, 10446), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (10444, 10446), False, 'import logging\n'), ((10470, 10500), 'os.path.join', 'os.path.join', (['root', '"""down.log"""'], {}), "(root, 'down.log')\n", (10482, 10500), False, 'import os\n'), ((10525, 10554), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (10544, 10554), False, 'import logging\n'), ((10903, 10969), 'map_download.cmd.BaseDownloader.BoundBox', 'BoundBox', (['max_lat', 'max_lng', 'min_lat', 'min_lng', 'start_zoom', 'end_zoom'], {}), '(max_lat, max_lng, min_lat, min_lng, start_zoom, end_zoom)\n', (10911, 10969), False, 'from map_download.cmd.BaseDownloader import DownloadEngine, BaseDownloaderThread, latlng2tile_terrain, BoundBox\n'), ((11065, 11082), 'time.sleep', 'time.sleep', (['(10000)'], {}), '(10000)\n', (11075, 11082), False, 'import time\n'), ((719, 732), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (729, 732), False, 'import time\n'), ((2222, 2278), 'requests.get', 'requests.get', (['_url'], {'params': 'param', 'stream': '(True)', 'timeout': '(2)'}), '(_url, params=param, stream=True, timeout=2)\n', (2234, 2278), False, 'import requests\n'), ((3379, 3395), 'math.ceil', 'math.ceil', (['max_x'], {}), '(max_x)\n', (3388, 3395), False, 'import math\n'), ((3401, 3417), 'math.ceil', 'math.ceil', (['max_y'], {}), '(max_y)\n', (3410, 3417), False, 'import math\n'), ((8645, 8668), 'json.dump', 'json.dump', (['metadatas', 'f'], {}), '(metadatas, f)\n', (8654, 8668), False, 'import json\n'), ((2380, 2393), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2390, 2393), False, 'import time\n'), ((9197, 9213), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (9207, 9213), False, 'import time\n')] |
"""Forms for RTD donations"""
import logging
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin
from readthedocs.payments.utils import stripe
from .models import Supporter
log = logging.getLogger(__name__)
class SupporterForm(StripeResourceMixin, StripeModelForm):
"""Donation support sign up form
This extends the basic payment form, giving fields for credit card number,
expiry, and CVV. The proper Knockout data bindings are established on
:py:class:`StripeModelForm`
"""
class Meta:
model = Supporter
fields = (
'last_4_digits',
'name',
'email',
'dollars',
'logo_url',
'site_url',
'public',
)
labels = {
'public': _('Make this donation public'),
}
help_texts = {
'public': _('Your name and image will be displayed on the donation page'),
'email': _('Your email is used for Gravatar and so we can send you a receipt'),
'logo_url': _("URL of your company's logo, images should be 300x300 pixels or less"),
'dollars': _('Companies donating over $400 can specify a logo URL and site link'),
}
widgets = {
'dollars': forms.HiddenInput(attrs={
'data-bind': 'value: dollars'
}),
'logo_url': forms.TextInput(attrs={
'data-bind': 'value: logo_url, enable: urls_enabled'
}),
'site_url': forms.TextInput(attrs={
'data-bind': 'value: site_url, enable: urls_enabled'
}),
'last_4_digits': forms.TextInput(attrs={
'data-bind': 'valueInit: card_digits, value: card_digits'
}),
}
last_4_digits = forms.CharField(widget=forms.HiddenInput(), required=True)
name = forms.CharField(required=True)
email = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(SupporterForm, self).__init__(*args, **kwargs)
def validate_stripe(self):
"""Call stripe for payment (not ideal here) and clean up logo < $200"""
dollars = self.cleaned_data['dollars']
if dollars < 200:
self.cleaned_data['logo_url'] = None
self.cleaned_data['site_url'] = None
stripe.Charge.create(
amount=int(self.cleaned_data['dollars']) * 100,
currency='usd',
source=self.cleaned_data['stripe_token'],
description='Read the Docs Sustained Engineering',
receipt_email=self.cleaned_data['email']
)
def save(self, commit=True):
supporter = super(SupporterForm, self).save(commit)
if commit and self.user is not None and self.user.is_authenticated():
supporter.user = self.user
supporter.save()
return supporter
class EthicalAdForm(StripeResourceMixin, StripeModelForm):
"""Payment form for ethical ads
This extends the basic payment form, giving fields for credit card number,
expiry, and CVV. The proper Knockout data bindings are established on
:py:class:`StripeModelForm`
"""
class Meta:
model = Supporter
fields = (
'last_4_digits',
'name',
'email',
'dollars',
)
help_texts = {
'email': _('Your email is used so we can send you a receipt'),
}
widgets = {
'dollars': forms.HiddenInput(attrs={
'data-bind': 'value: dollars'
}),
'last_4_digits': forms.TextInput(attrs={
'data-bind': 'valueInit: card_digits, value: card_digits'
}),
}
last_4_digits = forms.CharField(widget=forms.HiddenInput(), required=True)
name = forms.CharField(required=True)
email = forms.CharField(required=True)
def validate_stripe(self):
stripe.Charge.create(
amount=int(self.cleaned_data['dollars']) * 100,
currency='usd',
source=self.cleaned_data['stripe_token'],
description='Read the Docs Sponsorship Payment',
receipt_email=self.cleaned_data['email']
)
| [
"logging.getLogger",
"django.forms.HiddenInput",
"django.utils.translation.ugettext_lazy",
"django.forms.CharField",
"django.forms.TextInput"
] | [((322, 349), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (339, 349), False, 'import logging\n'), ((2007, 2037), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)'}), '(required=True)\n', (2022, 2037), False, 'from django import forms\n'), ((2050, 2080), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)'}), '(required=True)\n', (2065, 2080), False, 'from django import forms\n'), ((4009, 4039), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)'}), '(required=True)\n', (4024, 4039), False, 'from django import forms\n'), ((4052, 4082), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)'}), '(required=True)\n', (4067, 4082), False, 'from django import forms\n'), ((919, 949), 'django.utils.translation.ugettext_lazy', '_', (['"""Make this donation public"""'], {}), "('Make this donation public')\n", (920, 949), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1006, 1069), 'django.utils.translation.ugettext_lazy', '_', (['"""Your name and image will be displayed on the donation page"""'], {}), "('Your name and image will be displayed on the donation page')\n", (1007, 1069), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1092, 1161), 'django.utils.translation.ugettext_lazy', '_', (['"""Your email is used for Gravatar and so we can send you a receipt"""'], {}), "('Your email is used for Gravatar and so we can send you a receipt')\n", (1093, 1161), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1187, 1259), 'django.utils.translation.ugettext_lazy', '_', (['"""URL of your company\'s logo, images should be 300x300 pixels or less"""'], {}), '("URL of your company\'s logo, images should be 300x300 pixels or less")\n', (1188, 1259), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1284, 1354), 'django.utils.translation.ugettext_lazy', '_', (['"""Companies donating over $400 can specify a logo URL and site link"""'], {}), "('Companies donating over $400 can specify a logo URL and site link')\n", (1285, 1354), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1409, 1465), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {'attrs': "{'data-bind': 'value: dollars'}"}), "(attrs={'data-bind': 'value: dollars'})\n", (1426, 1465), False, 'from django import forms\n'), ((1521, 1598), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'data-bind': 'value: logo_url, enable: urls_enabled'}"}), "(attrs={'data-bind': 'value: logo_url, enable: urls_enabled'})\n", (1536, 1598), False, 'from django import forms\n'), ((1654, 1731), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'data-bind': 'value: site_url, enable: urls_enabled'}"}), "(attrs={'data-bind': 'value: site_url, enable: urls_enabled'})\n", (1669, 1731), False, 'from django import forms\n'), ((1792, 1878), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'data-bind': 'valueInit: card_digits, value: card_digits'}"}), "(attrs={'data-bind':\n 'valueInit: card_digits, value: card_digits'})\n", (1807, 1878), False, 'from django import forms\n'), ((1960, 1979), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (1977, 1979), False, 'from django import forms\n'), ((3570, 3622), 'django.utils.translation.ugettext_lazy', '_', (['"""Your email is used so we can send you a receipt"""'], {}), "('Your email is used so we can send you a receipt')\n", (3571, 3622), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3677, 3733), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {'attrs': "{'data-bind': 'value: dollars'}"}), "(attrs={'data-bind': 'value: dollars'})\n", (3694, 3733), False, 'from django import forms\n'), ((3794, 3880), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'data-bind': 'valueInit: card_digits, value: card_digits'}"}), "(attrs={'data-bind':\n 'valueInit: card_digits, value: card_digits'})\n", (3809, 3880), False, 'from django import forms\n'), ((3962, 3981), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (3979, 3981), False, 'from django import forms\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base import DataReaderBase
from ..tools import COL, _get_dates, to_float, to_int
import pandas as pd
#from pandas.tseries.frequencies import to_offset
from six.moves import cStringIO as StringIO
import logging
import traceback
import datetime
import json
import token, tokenize
def ymd_to_date(y, m, d):
"""
Returns date
>>> expiration = {u'd': 1, u'm': 12, u'y': 2014}
>>> ymd_to_date(**expiration)
datetime.date(2014, 12, 1)
>>> ymd_to_date(2014, 3, 1)
datetime.date(2014, 3, 1)
"""
return(datetime.date(year=y, month=m, day=d))
def date_to_ymd(date):
"""
Returns dict like {'y': ..., 'm': ..., 'd': ...}
>>> date_to_ymd(datetime.date(year=2010, month=1, day=3))
{'y': 2010, 'm': 1, 'd': 3}
"""
d = {
'y': date.year,
'm': date.month,
'd': date.day
}
return(d)
def fix_lazy_json(in_text):
"""
Handle lazy JSON - to fix expecting property name
this function fixes the json output from google
http://stackoverflow.com/questions/4033633/handling-lazy-json-in-python-expecting-property-name
"""
tokengen = tokenize.generate_tokens(StringIO(in_text).readline)
result = []
for tokid, tokval, _, _, _ in tokengen:
# fix unquoted strings
if (tokid == token.NAME):
if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
tokid = token.STRING
tokval = u'"%s"' % tokval
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
# remove invalid commas
elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
if (len(result) > 0) and (result[-1][1] == ','):
result.pop()
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
result.append((tokid, tokval))
return tokenize.untokenize(result)
def json_decode(json_string):
try:
ret = json.loads(json_string)
except:
json_string = fix_lazy_json(json_string)
ret = json.loads(json_string)
return ret
class DataReaderGoogleFinanceOptions(DataReaderBase):
"""
DataReader to fetch data from Google Finance Options
see https://www.google.com/finance/option_chain
https://github.com/makmac213/python-google-option-chain
http://www.drtomstarke.com/index.php/option-chains-from-google-finance-api
"""
def init(self, *args, **kwargs):
self._get_multi = self._get_multi_todict
def _get_one(self, name, *args, **kwargs):
return(self._get_one_raw(name, 'All', 'json'))
def _get_one_raw(self, symbol, typ='All', output='json', y='2014', m='12', d='1'):
url = "https://www.google.com/finance/option_chain"
params = {
'q': symbol,
'type': typ,
'output': output,
}
data = self._get_content(url, params)
d = {}
lst = []
for typ in [u'puts', u'calls']:
df_typ = pd.DataFrame(data[typ])
df_typ['Type'] = typ
lst.append(df_typ)
del data[typ]
for i, expiration in enumerate(data['expirations']):
params = {
'q': symbol,
'output': output,
'expy': expiration['y'],
'expm': expiration['m'],
'expd': expiration['d'],
}
data = self._get_content(url, params)
for typ in [u'puts', u'calls']:
df_typ = pd.DataFrame(data[typ])
df_typ['Type'] = typ
lst.append(df_typ)
del data[typ]
lst.append(df_typ)
df = pd.concat(lst, axis=0, ignore_index=True)
d_cols = {
"a": "Ask",
"b": "Bid",
"p": "Last",
"strike": "Strike",
"expiry": "Expiry",
"vol": "Volume",
"name": "Name"
}
df = df.rename(columns=d_cols)
"""
d_cols = {
"a": "ask",
"b": "bid",
"c": "change",
"cid": "identity code",
"cp": "cp"
"cs": change direction. "chg" = up, "chr" = down, "chg"?
"e": # I think this tells us something about what country where the stock is traded. "OPRA" means USA.
"expiry": expiration date for this option
"name": I don't know. I have never seen a value for this
"oi": open interest. How many of these are currently being held by others.
See, http://www.investopedia.com/terms/o/openinterest.asp
"p": price, last
"s": option code.
Basically, Stock Symbol + 7 if mini option + date + "C" or "P" + price
"strike": "strike price for this option"
"vol": "the volume of options traded."
}
"""
for col in ['Ask', 'Bid', 'c', 'cp', 'Last', 'Strike']:
df[col] = df[col].map(to_float)
for col in ['Volume', 'oi', 'cid']:
df[col] = df[col].map(to_int)
df['Expiry'] = pd.to_datetime(df['Expiry'])
data['options'] = df
data['underlying_id'] = int(data['underlying_id'])
data['expiry'] = ymd_to_date(**data['expiry'])
for i, expiration in enumerate(data['expirations']):
data['expirations'][i] = ymd_to_date(**expiration)
#for col in ['Volume']:
# df[col] = df[col].fillna(0)
#d = {}
#d["options"] = df
#return(d)
return(data)
def _get_content(self, url, params):
#response = requests.get(url, params=params)
response = self.session.get(url, params=params)
if response.status_code == 200:
content_json = response.text
data = json_decode(content_json)
return(data)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"json.loads",
"tokenize.untokenize",
"six.moves.cStringIO",
"doctest.testmod",
"datetime.date",
"pandas.DataFrame",
"pandas.concat",
"pandas.to_datetime"
] | [((588, 625), 'datetime.date', 'datetime.date', ([], {'year': 'y', 'month': 'm', 'day': 'd'}), '(year=y, month=m, day=d)\n', (601, 625), False, 'import datetime\n'), ((2146, 2173), 'tokenize.untokenize', 'tokenize.untokenize', (['result'], {}), '(result)\n', (2165, 2173), False, 'import token, tokenize\n'), ((6240, 6257), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6255, 6257), False, 'import doctest\n'), ((2228, 2251), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (2238, 2251), False, 'import json\n'), ((3973, 4014), 'pandas.concat', 'pd.concat', (['lst'], {'axis': '(0)', 'ignore_index': '(True)'}), '(lst, axis=0, ignore_index=True)\n', (3982, 4014), True, 'import pandas as pd\n'), ((5409, 5437), 'pandas.to_datetime', 'pd.to_datetime', (["df['Expiry']"], {}), "(df['Expiry'])\n", (5423, 5437), True, 'import pandas as pd\n'), ((1207, 1224), 'six.moves.cStringIO', 'StringIO', (['in_text'], {}), '(in_text)\n', (1215, 1224), True, 'from six.moves import cStringIO as StringIO\n'), ((2327, 2350), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (2337, 2350), False, 'import json\n'), ((3274, 3297), 'pandas.DataFrame', 'pd.DataFrame', (['data[typ]'], {}), '(data[typ])\n', (3286, 3297), True, 'import pandas as pd\n'), ((3796, 3819), 'pandas.DataFrame', 'pd.DataFrame', (['data[typ]'], {}), '(data[typ])\n', (3808, 3819), True, 'import pandas as pd\n')] |
from django.db.models import Q
from django.shortcuts import render
from django.http import Http404
# Create your views here.
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Product, Category
from .serializers import ProductSerializer, CategorySerializer
class LatestProductsList(APIView):
def get(self, request, format=None):
products = Product.objects.all()[0:4]
serializer = ProductSerializer(products,many=True)
return Response(serializer.data)
class ProductDetail(APIView):
def get_object(self, category_slug, product_slug):
try:
return Product.objects.filter(category__slug=category_slug).get(slug=product_slug)
except Product.DoesNotExist:
raise Http404
def get(self, request, category_slug, product_slug, format= None):
product = self.get_object(category_slug, product_slug)
serializer = ProductSerializer(product)
return Response(serializer.data)
class CategoryDetail(APIView):
def get_object(self, category_slug):
try:
return Category.objects.get(slug=category_slug)
except Category.DoesNotExist:
raise Http404
def get(self, request, category_slug, format= None):
category = self.get_object(category_slug)
serializer = CategorySerializer(category)
return Response(serializer.data)
@api_view(['POST'])
def search(request):
query = request.data.get('query', '')
if query:
products = Product.objects.filter(Q(name__icontains=query) | Q(description__icontains=query))
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
else:
return Response({"products": []}) | [
"rest_framework.response.Response",
"rest_framework.decorators.api_view",
"django.db.models.Q"
] | [((1481, 1499), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (1489, 1499), False, 'from rest_framework.decorators import api_view\n'), ((559, 584), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (567, 584), False, 'from rest_framework.response import Response\n'), ((1040, 1065), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1048, 1065), False, 'from rest_framework.response import Response\n'), ((1453, 1478), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1461, 1478), False, 'from rest_framework.response import Response\n'), ((1755, 1780), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1763, 1780), False, 'from rest_framework.response import Response\n'), ((1806, 1832), 'rest_framework.response.Response', 'Response', (["{'products': []}"], {}), "({'products': []})\n", (1814, 1832), False, 'from rest_framework.response import Response\n'), ((1620, 1644), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'query'}), '(name__icontains=query)\n', (1621, 1644), False, 'from django.db.models import Q\n'), ((1647, 1678), 'django.db.models.Q', 'Q', ([], {'description__icontains': 'query'}), '(description__icontains=query)\n', (1648, 1678), False, 'from django.db.models import Q\n')] |
##########################################################################
#
# Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import sys
import shutil
import unittest
import IECore
class TestBasicPreset( unittest.TestCase ) :
def testCopy( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
p = IECore.BasicPreset( testObj, testObj.parameters() )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
p2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) )
self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) )
p2( testObj2, testObj2.parameters() )
self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 )
def testLoad( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised1" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
p = IECore.BasicPreset( os.path.join( savePath, "basicPresetLoadTest", "basicPresetLoadTest-1.cob" ) )
self.assertEqual( len( messageHandler.messages ), 0 )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
def testSave( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised1" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
preset = IECore.BasicPreset( testObj, testObj.parameters() )
# Save for the classLoader and check its there, we test the 'loadability' later...
preset.save( savePath, "basicPresetTest" )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.cob" ) ) )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.py" ) ) )
# save without the classLoader and check its there
preset.save( savePath, "basicPresetTest", classLoadable=False )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest.cob" ) ) )
# reload
p = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest.cob" ) )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
preset2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) )
preset2.save( savePath, "basicPresetTest2", classLoadable=False )
#reload
p2 = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest2.cob" ) )
self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) )
p2( testObj2, testObj2.parameters() )
self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 )
def testClassLoader( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
preset = IECore.BasicPreset( testObj, testObj.parameters() )
preset.save( savePath, "basicPresetTestClassLoader" )
# make sure that no messages are emitted during loading
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
loader = IECore.ClassLoader( IECore.SearchPath( savePath ) )
p = loader.load( "basicPresetTestClassLoader" )()
self.assertEqual( len( messageHandler.messages ), 0 )
self.assertTrue( isinstance( p, IECore.BasicPreset ) )
p.metadata()
def testClasses( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.ClassParameter( "b", "", "IECORE_OP_PATHS", os.path.join( "maths", "multiply" ), 2 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.ClassParameter( "c", "", "IECORE_OP_PATHS" ),
]
)
classes1 = testObj.parameters()["b"].getClass( True )
classes2 = testObj2.parameters()["c"].getClass( True )
self.assertNotEqual( classes1[1:], classes2[1:] )
p = IECore.BasicPreset( testObj, testObj.parameters()["b"] )
self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) )
self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) )
p( testObj2, testObj2.parameters()["c"] )
classes1 = testObj.parameters()["b"].getClass( True )
classes2 = testObj2.parameters()["c"].getClass( True )
self.assertEqual( classes1[1:], classes2[1:] )
def testClassVectors( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.ClassVectorParameter( "b", "", "IECORE_OP_PATHS" ),
]
)
testObj.parameters()["b"].setClasses(
[
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 1 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.ClassVectorParameter( "c", "", "IECORE_OP_PATHS" ),
]
)
classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ]
classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ]
self.assertNotEqual( classes1, classes2 )
p = IECore.BasicPreset( testObj, testObj.parameters()["b"] )
self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) )
self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) )
p( testObj2, testObj2.parameters()["c"] )
classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ]
classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ]
self.assertEqual( classes1, classes2 )
def testCompoundVectorParameter( self ) :
p = IECore.Parameterised( "test" )
p.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.CompoundVectorParameter(
"c",
"",
members = [
IECore.StringVectorParameter( "s", "", IECore.StringVectorData() ),
IECore.BoolVectorParameter( "b", "", IECore.BoolVectorData() ),
]
)
]
)
p["c"]["s"].setValue( IECore.StringVectorData( [ "1", "2", "3" ] ) )
p["c"]["b"].setValue( IECore.BoolVectorData( [ True, False, True ] ) )
v = p.parameters().getValue().copy()
preset = IECore.BasicPreset( p, p.parameters() )
self.assertTrue( preset.applicableTo( p, p.parameters() ) )
p.parameters().setValue( p.parameters().defaultValue )
self.assertNotEqual( p.parameters().getValue(), v )
preset( p, p.parameters() )
self.assertEqual( p.parameters().getValue(), v )
def tearDown( self ) :
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
paths = (
os.path.join( savePath, "basicPresetTest" ),
os.path.join( savePath, "basicPresetTest.cob" ),
os.path.join( savePath, "basicPresetTest2.cob" ),
os.path.join( savePath, "basicPresetTestClassLoader" ),
)
for p in paths :
if os.path.isdir( p ) :
shutil.rmtree( p )
elif os.path.isfile( p ) :
os.remove( p )
if __name__ == "__main__":
unittest.main()
| [
"IECore.SearchPath",
"IECore.Parameterised",
"IECore.CapturingMessageHandler",
"IECore.BoolVectorData",
"os.path.join",
"shutil.rmtree",
"os.path.isfile",
"os.path.dirname",
"os.path.isdir",
"IECore.ClassVectorParameter",
"IECore.FloatParameter",
"IECore.StringVectorData",
"unittest.main",
"IECore.BoolParameter",
"IECore.ClassParameter",
"os.remove"
] | [((11326, 11341), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11339, 11341), False, 'import unittest\n'), ((1977, 2019), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised1"""'], {}), "('testParameterised1')\n", (1997, 2019), False, 'import IECore\n'), ((2174, 2216), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised2"""'], {}), "('testParameterised2')\n", (2194, 2216), False, 'import IECore\n'), ((3305, 3347), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised1"""'], {}), "('testParameterised1')\n", (3325, 3347), False, 'import IECore\n'), ((3502, 3544), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised1"""'], {}), "('testParameterised1')\n", (3522, 3544), False, 'import IECore\n'), ((3807, 3839), 'IECore.CapturingMessageHandler', 'IECore.CapturingMessageHandler', ([], {}), '()\n', (3837, 3839), False, 'import IECore\n'), ((4487, 4529), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised1"""'], {}), "('testParameterised1')\n", (4507, 4529), False, 'import IECore\n'), ((4684, 4726), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised1"""'], {}), "('testParameterised1')\n", (4704, 4726), False, 'import IECore\n'), ((6730, 6772), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised1"""'], {}), "('testParameterised1')\n", (6750, 6772), False, 'import IECore\n'), ((7210, 7242), 'IECore.CapturingMessageHandler', 'IECore.CapturingMessageHandler', ([], {}), '()\n', (7240, 7242), False, 'import IECore\n'), ((7557, 7599), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised1"""'], {}), "('testParameterised1')\n", (7577, 7599), False, 'import IECore\n'), ((7808, 7850), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised2"""'], {}), "('testParameterised2')\n", (7828, 7850), False, 'import IECore\n'), ((8669, 8711), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised1"""'], {}), "('testParameterised1')\n", (8689, 8711), False, 'import IECore\n'), ((9039, 9081), 'IECore.Parameterised', 'IECore.Parameterised', (['"""testParameterised2"""'], {}), "('testParameterised2')\n", (9059, 9081), False, 'import IECore\n'), ((9980, 10008), 'IECore.Parameterised', 'IECore.Parameterised', (['"""test"""'], {}), "('test')\n", (10000, 10008), False, 'import IECore\n'), ((5624, 5669), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTest.cob"""'], {}), "(savePath, 'basicPresetTest.cob')\n", (5636, 5669), False, 'import os\n'), ((6309, 6355), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTest2.cob"""'], {}), "(savePath, 'basicPresetTest2.cob')\n", (6321, 6355), False, 'import os\n'), ((10355, 10395), 'IECore.StringVectorData', 'IECore.StringVectorData', (["['1', '2', '3']"], {}), "(['1', '2', '3'])\n", (10378, 10395), False, 'import IECore\n'), ((10426, 10468), 'IECore.BoolVectorData', 'IECore.BoolVectorData', (['[True, False, True]'], {}), '([True, False, True])\n', (10447, 10468), False, 'import IECore\n'), ((10965, 11006), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTest"""'], {}), "(savePath, 'basicPresetTest')\n", (10977, 11006), False, 'import os\n'), ((11013, 11058), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTest.cob"""'], {}), "(savePath, 'basicPresetTest.cob')\n", (11025, 11058), False, 'import os\n'), ((11065, 11111), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTest2.cob"""'], {}), "(savePath, 'basicPresetTest2.cob')\n", (11077, 11111), False, 'import os\n'), ((11118, 11170), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTestClassLoader"""'], {}), "(savePath, 'basicPresetTestClassLoader')\n", (11130, 11170), False, 'import os\n'), ((11204, 11220), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (11217, 11220), False, 'import os\n'), ((2069, 2104), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(True)'], {}), "('a', '', True)\n", (2089, 2104), False, 'import IECore\n'), ((2112, 2147), 'IECore.FloatParameter', 'IECore.FloatParameter', (['"""b"""', '""""""', '(1.0)'], {}), "('b', '', 1.0)\n", (2133, 2147), False, 'import IECore\n'), ((2267, 2303), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(False)'], {}), "('a', '', False)\n", (2287, 2303), False, 'import IECore\n'), ((2311, 2346), 'IECore.FloatParameter', 'IECore.FloatParameter', (['"""c"""', '""""""', '(0.0)'], {}), "('c', '', 0.0)\n", (2332, 2346), False, 'import IECore\n'), ((3397, 3432), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(True)'], {}), "('a', '', True)\n", (3417, 3432), False, 'import IECore\n'), ((3440, 3475), 'IECore.FloatParameter', 'IECore.FloatParameter', (['"""b"""', '""""""', '(1.0)'], {}), "('b', '', 1.0)\n", (3461, 3475), False, 'import IECore\n'), ((3595, 3631), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(False)'], {}), "('a', '', False)\n", (3615, 3631), False, 'import IECore\n'), ((3639, 3674), 'IECore.FloatParameter', 'IECore.FloatParameter', (['"""c"""', '""""""', '(0.0)'], {}), "('c', '', 0.0)\n", (3660, 3674), False, 'import IECore\n'), ((3732, 3757), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3747, 3757), False, 'import os\n'), ((3892, 3966), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetLoadTest"""', '"""basicPresetLoadTest-1.cob"""'], {}), "(savePath, 'basicPresetLoadTest', 'basicPresetLoadTest-1.cob')\n", (3904, 3966), False, 'import os\n'), ((4579, 4614), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(True)'], {}), "('a', '', True)\n", (4599, 4614), False, 'import IECore\n'), ((4622, 4657), 'IECore.FloatParameter', 'IECore.FloatParameter', (['"""b"""', '""""""', '(1.0)'], {}), "('b', '', 1.0)\n", (4643, 4657), False, 'import IECore\n'), ((4777, 4813), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(False)'], {}), "('a', '', False)\n", (4797, 4813), False, 'import IECore\n'), ((4821, 4856), 'IECore.FloatParameter', 'IECore.FloatParameter', (['"""c"""', '""""""', '(0.0)'], {}), "('c', '', 0.0)\n", (4842, 4856), False, 'import IECore\n'), ((4914, 4939), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4929, 4939), False, 'import os\n'), ((5199, 5265), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTest"""', '"""basicPresetTest-1.cob"""'], {}), "(savePath, 'basicPresetTest', 'basicPresetTest-1.cob')\n", (5211, 5265), False, 'import os\n'), ((5307, 5372), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTest"""', '"""basicPresetTest-1.py"""'], {}), "(savePath, 'basicPresetTest', 'basicPresetTest-1.py')\n", (5319, 5372), False, 'import os\n'), ((5534, 5579), 'os.path.join', 'os.path.join', (['savePath', '"""basicPresetTest.cob"""'], {}), "(savePath, 'basicPresetTest.cob')\n", (5546, 5579), False, 'import os\n'), ((6822, 6857), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(True)'], {}), "('a', '', True)\n", (6842, 6857), False, 'import IECore\n'), ((6865, 6900), 'IECore.FloatParameter', 'IECore.FloatParameter', (['"""b"""', '""""""', '(1.0)'], {}), "('b', '', 1.0)\n", (6886, 6900), False, 'import IECore\n'), ((6958, 6983), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6973, 6983), False, 'import os\n'), ((7300, 7327), 'IECore.SearchPath', 'IECore.SearchPath', (['savePath'], {}), '(savePath)\n', (7317, 7327), False, 'import IECore\n'), ((7649, 7684), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(True)'], {}), "('a', '', True)\n", (7669, 7684), False, 'import IECore\n'), ((7901, 7950), 'IECore.ClassParameter', 'IECore.ClassParameter', (['"""c"""', '""""""', '"""IECORE_OP_PATHS"""'], {}), "('c', '', 'IECORE_OP_PATHS')\n", (7922, 7950), False, 'import IECore\n'), ((8761, 8796), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(True)'], {}), "('a', '', True)\n", (8781, 8796), False, 'import IECore\n'), ((8804, 8859), 'IECore.ClassVectorParameter', 'IECore.ClassVectorParameter', (['"""b"""', '""""""', '"""IECORE_OP_PATHS"""'], {}), "('b', '', 'IECORE_OP_PATHS')\n", (8831, 8859), False, 'import IECore\n'), ((9132, 9187), 'IECore.ClassVectorParameter', 'IECore.ClassVectorParameter', (['"""c"""', '""""""', '"""IECORE_OP_PATHS"""'], {}), "('c', '', 'IECORE_OP_PATHS')\n", (9159, 9187), False, 'import IECore\n'), ((10052, 10088), 'IECore.BoolParameter', 'IECore.BoolParameter', (['"""a"""', '""""""', '(False)'], {}), "('a', '', False)\n", (10072, 10088), False, 'import IECore\n'), ((10895, 10920), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10910, 10920), False, 'import os\n'), ((11229, 11245), 'shutil.rmtree', 'shutil.rmtree', (['p'], {}), '(p)\n', (11242, 11245), False, 'import shutil\n'), ((11256, 11273), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (11270, 11273), False, 'import os\n'), ((7743, 7776), 'os.path.join', 'os.path.join', (['"""maths"""', '"""multiply"""'], {}), "('maths', 'multiply')\n", (7755, 7776), False, 'import os\n'), ((8932, 8965), 'os.path.join', 'os.path.join', (['"""maths"""', '"""multiply"""'], {}), "('maths', 'multiply')\n", (8944, 8965), False, 'import os\n'), ((11282, 11294), 'os.remove', 'os.remove', (['p'], {}), '(p)\n', (11291, 11294), False, 'import os\n'), ((10209, 10234), 'IECore.StringVectorData', 'IECore.StringVectorData', ([], {}), '()\n', (10232, 10234), False, 'import IECore\n'), ((10281, 10304), 'IECore.BoolVectorData', 'IECore.BoolVectorData', ([], {}), '()\n', (10302, 10304), False, 'import IECore\n')] |
from zeit.cms.i18n import MessageFactory as _
import zope.interface
import zope.schema
class IGlobalSettings(zope.interface.Interface):
"""Global CMS settings."""
default_year = zope.schema.Int(
title=_("Default year"),
min=1900,
max=2100)
default_volume = zope.schema.Int(
title=_("Default volume"),
min=1,
max=54)
def get_working_directory(template):
"""Return the collection which is the main working directory.
template:
Template which will be filled with year and volume. In
``template`` the placeholders $year and $volume will be replaced.
Example: 'online/$year/$volume/foo'
If the respective collection does not exist, it will be created before
returning it.
"""
| [
"zeit.cms.i18n.MessageFactory"
] | [((220, 237), 'zeit.cms.i18n.MessageFactory', '_', (['"""Default year"""'], {}), "('Default year')\n", (221, 237), True, 'from zeit.cms.i18n import MessageFactory as _\n'), ((328, 347), 'zeit.cms.i18n.MessageFactory', '_', (['"""Default volume"""'], {}), "('Default volume')\n", (329, 347), True, 'from zeit.cms.i18n import MessageFactory as _\n')] |
import imtreat
img = imtreat.imageManagerClass.openImageFunction("../images/soleil.png", 0)
img = imtreat.definedModesClass.detailEnhanceFunction(img)
imtreat.imageManagerClass.saveImageFunction("/Téléchargements/", "image_1", ".png", img)
| [
"imtreat.imageManagerClass.saveImageFunction",
"imtreat.imageManagerClass.openImageFunction",
"imtreat.definedModesClass.detailEnhanceFunction"
] | [((22, 92), 'imtreat.imageManagerClass.openImageFunction', 'imtreat.imageManagerClass.openImageFunction', (['"""../images/soleil.png"""', '(0)'], {}), "('../images/soleil.png', 0)\n", (65, 92), False, 'import imtreat\n'), ((100, 152), 'imtreat.definedModesClass.detailEnhanceFunction', 'imtreat.definedModesClass.detailEnhanceFunction', (['img'], {}), '(img)\n', (147, 152), False, 'import imtreat\n'), ((154, 246), 'imtreat.imageManagerClass.saveImageFunction', 'imtreat.imageManagerClass.saveImageFunction', (['"""/Téléchargements/"""', '"""image_1"""', '""".png"""', 'img'], {}), "('/Téléchargements/', 'image_1',\n '.png', img)\n", (197, 246), False, 'import imtreat\n')] |
import requests
words_list = requests.get("https://raw.githubusercontent.com/atebits/Words/master/Words/fr.txt").text
words_list = filter(lambda x: len(x) > 4, words_list.split('\n'))
path = input("Chemin d'écriture ? (words.txt) ")
if path == "":
path = "./words.txt"
with open(path, "w", encoding="utf-8") as file:
file.write('\n'.join(words_list)) | [
"requests.get"
] | [((30, 118), 'requests.get', 'requests.get', (['"""https://raw.githubusercontent.com/atebits/Words/master/Words/fr.txt"""'], {}), "(\n 'https://raw.githubusercontent.com/atebits/Words/master/Words/fr.txt')\n", (42, 118), False, 'import requests\n')] |
import unittest
from unittest import mock
import os
import subprocess
from testfixtures import TempDirectory
from simplegallery.upload.uploader_factory import get_uploader
class AWSUploaderTestCase(unittest.TestCase):
def test_no_location(self):
uploader = get_uploader('aws')
self.assertFalse(uploader.check_location(''))
@mock.patch('subprocess.run')
def test_upload_gallery(self, subprocess_run):
subprocess_run.return_value = subprocess.CompletedProcess([], returncode=0)
with TempDirectory() as tempdir:
# Setup mock file and uploader
tempdir.write('index.html', b'')
gallery_path = os.path.join(tempdir.path, 'index.html')
uploader = get_uploader('aws')
# Test upload to bucket
uploader.upload_gallery('s3://testbucket/path/', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
# Test upload to bucket without prefix
uploader.upload_gallery('testbucket/path/', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
# Test upload to bucket without trailing /
uploader.upload_gallery('s3://testbucket/path', gallery_path)
subprocess_run.assert_called_with(
['aws', 's3', 'sync', gallery_path, 's3://testbucket/path/', '--exclude', '.DS_Store'])
if __name__ == '__main__':
unittest.main()
| [
"testfixtures.TempDirectory",
"subprocess.CompletedProcess",
"os.path.join",
"unittest.main",
"unittest.mock.patch",
"simplegallery.upload.uploader_factory.get_uploader"
] | [((352, 380), 'unittest.mock.patch', 'mock.patch', (['"""subprocess.run"""'], {}), "('subprocess.run')\n", (362, 380), False, 'from unittest import mock\n'), ((1607, 1622), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1620, 1622), False, 'import unittest\n'), ((272, 291), 'simplegallery.upload.uploader_factory.get_uploader', 'get_uploader', (['"""aws"""'], {}), "('aws')\n", (284, 291), False, 'from simplegallery.upload.uploader_factory import get_uploader\n'), ((470, 515), 'subprocess.CompletedProcess', 'subprocess.CompletedProcess', (['[]'], {'returncode': '(0)'}), '([], returncode=0)\n', (497, 515), False, 'import subprocess\n'), ((530, 545), 'testfixtures.TempDirectory', 'TempDirectory', ([], {}), '()\n', (543, 545), False, 'from testfixtures import TempDirectory\n'), ((673, 713), 'os.path.join', 'os.path.join', (['tempdir.path', '"""index.html"""'], {}), "(tempdir.path, 'index.html')\n", (685, 713), False, 'import os\n'), ((737, 756), 'simplegallery.upload.uploader_factory.get_uploader', 'get_uploader', (['"""aws"""'], {}), "('aws')\n", (749, 756), False, 'from simplegallery.upload.uploader_factory import get_uploader\n')] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
log = logging.getLogger(__name__)
class SLURMEnvironment(ClusterEnvironment):
"""Cluster environment for training on a cluster managed by SLURM."""
@property
def creates_processes_externally(self) -> bool:
return True
@staticmethod
def detect() -> bool:
"""Returns ``True`` if the current process was launched on a SLURM cluster."""
return "SLURM_NTASKS" in os.environ
@property
def main_address(self) -> str:
# figure out the root node addr
slurm_nodelist = os.environ.get("SLURM_NODELIST")
if slurm_nodelist:
root_node = slurm_nodelist.split(" ")[0].split(",")[0]
else:
root_node = "127.0.0.1"
root_node = self.resolve_root_node_address(root_node)
os.environ["MASTER_ADDR"] = root_node
log.debug(f"MASTER_ADDR: {os.environ['MASTER_ADDR']}")
return root_node
@property
def main_port(self) -> int:
# -----------------------
# SLURM JOB = PORT number
# -----------------------
# this way every process knows what port to use
default_port = os.environ.get("SLURM_JOB_ID")
if default_port:
# use the last 4 numbers in the job id as the id
default_port = default_port[-4:]
# all ports should be in the 10k+ range
default_port = int(default_port) + 15000
else:
default_port = 12910
# -----------------------
# PORT NUMBER = MASTER_PORT
# -----------------------
# in case the user passed it in
if "MASTER_PORT" in os.environ:
default_port = os.environ["MASTER_PORT"]
else:
os.environ["MASTER_PORT"] = str(default_port)
log.debug(f"MASTER_PORT: {os.environ['MASTER_PORT']}")
return int(default_port)
def world_size(self) -> int:
return int(os.environ["SLURM_NTASKS"])
def set_world_size(self, size: int) -> None:
log.debug("SLURMEnvironment.set_world_size was called, but setting world size is not allowed. Ignored.")
def global_rank(self) -> int:
return int(os.environ["SLURM_PROCID"])
def set_global_rank(self, rank: int) -> None:
log.debug("SLURMEnvironment.set_global_rank was called, but setting global rank is not allowed. Ignored.")
def local_rank(self) -> int:
return int(os.environ["SLURM_LOCALID"])
def node_rank(self) -> int:
return int(os.environ["SLURM_NODEID"])
def resolve_root_node_address(self, root_node: str) -> str:
if "[" in root_node:
name, numbers = root_node.split("[", maxsplit=1)
number = numbers.split(",", maxsplit=1)[0]
if "-" in number:
number = number.split("-")[0]
number = re.sub("[^0-9]", "", number)
root_node = name + number
return root_node
| [
"logging.getLogger",
"re.sub",
"os.environ.get"
] | [((720, 747), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (737, 747), False, 'import logging\n'), ((1246, 1278), 'os.environ.get', 'os.environ.get', (['"""SLURM_NODELIST"""'], {}), "('SLURM_NODELIST')\n", (1260, 1278), False, 'import os\n'), ((1848, 1878), 'os.environ.get', 'os.environ.get', (['"""SLURM_JOB_ID"""'], {}), "('SLURM_JOB_ID')\n", (1862, 1878), False, 'import os\n'), ((3532, 3560), 're.sub', 're.sub', (['"""[^0-9]"""', '""""""', 'number'], {}), "('[^0-9]', '', number)\n", (3538, 3560), False, 'import re\n')] |
#
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.kullback_leiblers import gauss_kl
from gpflux.encoders import DirectlyParameterizedNormalDiag
from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer
tf.keras.backend.set_floatx("float64")
############
# Utilities
############
def _zero_one_normal_prior(w_dim):
""" N(0, I) prior """
return tfp.distributions.MultivariateNormalDiag(loc=np.zeros(w_dim), scale_diag=np.ones(w_dim))
def get_distributions_with_w_dim():
distributions = []
for d in [1, 5]:
mean = np.zeros(d)
scale_tri_l = np.eye(d)
mvn = tfp.distributions.MultivariateNormalTriL(mean, scale_tri_l)
std = np.ones(d)
mvn_diag = tfp.distributions.MultivariateNormalDiag(mean, std)
distributions.append((mvn, d))
distributions.append((mvn_diag, d))
return distributions
############
# Tests
############
@pytest.mark.parametrize("distribution, w_dim", get_distributions_with_w_dim())
def test_local_kls(distribution, w_dim):
lv = LatentVariableLayer(encoder=None, prior=distribution)
# test kl is 0 when posteriors == priors
posterior = distribution
assert lv._local_kls(posterior) == 0
# test kl > 0 when posteriors != priors
batch_size = 10
params = distribution.parameters
posterior_params = {
k: [v + 0.5 for _ in range(batch_size)]
for k, v in params.items()
if isinstance(v, np.ndarray)
}
posterior = lv.distribution_class(**posterior_params)
local_kls = lv._local_kls(posterior)
assert np.all(local_kls > 0)
assert local_kls.shape == (batch_size,)
@pytest.mark.parametrize("w_dim", [1, 5])
def test_local_kl_gpflow_consistency(w_dim):
num_data = 400
means = np.random.randn(num_data, w_dim)
encoder = DirectlyParameterizedNormalDiag(num_data, w_dim, means)
lv = LatentVariableLayer(encoder=encoder, prior=_zero_one_normal_prior(w_dim))
posteriors = lv._inference_posteriors(
[np.random.randn(num_data, 3), np.random.randn(num_data, 2)]
)
q_mu = posteriors.parameters["loc"]
q_sqrt = posteriors.parameters["scale_diag"]
gpflow_local_kls = gauss_kl(q_mu, q_sqrt)
tfp_local_kls = tf.reduce_sum(lv._local_kls(posteriors))
np.testing.assert_allclose(tfp_local_kls, gpflow_local_kls, rtol=1e-10)
class ArrayMatcher:
def __init__(self, expected):
self.expected = expected
def __eq__(self, actual):
return np.allclose(actual, self.expected, equal_nan=True)
@pytest.mark.parametrize("w_dim", [1, 5])
def test_latent_variable_layer_losses(mocker, w_dim):
num_data, x_dim, y_dim = 43, 3, 1
prior_shape = (w_dim,)
posteriors_shape = (num_data, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*prior_shape),
scale_diag=np.random.randn(*prior_shape) ** 2,
)
posteriors = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*posteriors_shape),
scale_diag=np.random.randn(*posteriors_shape) ** 2,
)
encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag))
lv = LatentVariableLayer(encoder=encoder, prior=prior)
inputs = np.full((num_data, x_dim), np.nan)
targets = np.full((num_data, y_dim), np.nan)
observations = [inputs, targets]
encoder_inputs = np.concatenate(observations, axis=-1)
_ = lv(inputs)
encoder.assert_not_called()
assert lv.losses == [0.0]
_ = lv(inputs, observations=observations, training=True)
# assert_called_once_with uses == for comparison which fails on arrays
encoder.assert_called_once_with(ArrayMatcher(encoder_inputs), training=True)
expected_loss = [tf.reduce_mean(posteriors.kl_divergence(prior))]
np.testing.assert_equal(lv.losses, expected_loss) # also checks shapes match
@pytest.mark.parametrize("w_dim", [1, 5])
@pytest.mark.parametrize("seed2", [None, 42])
def test_latent_variable_layer_samples(mocker, test_data, w_dim, seed2):
seed = 123
inputs, targets = test_data
num_data, x_dim = inputs.shape
prior_shape = (w_dim,)
posteriors_shape = (num_data, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*prior_shape),
scale_diag=np.random.randn(*prior_shape) ** 2,
)
posteriors = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*posteriors_shape),
scale_diag=np.random.randn(*posteriors_shape) ** 2,
)
encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag))
lv = LatentVariableLayer(prior=prior, encoder=encoder)
tf.random.set_seed(seed)
sample_prior = lv(inputs, seed=seed2)
tf.random.set_seed(seed)
prior_expected = np.concatenate([inputs, prior.sample(num_data, seed=seed2)], axis=-1)
np.testing.assert_array_equal(sample_prior, prior_expected)
tf.random.set_seed(seed)
sample_posterior = lv(inputs, observations=[inputs, targets], training=True, seed=seed2)
tf.random.set_seed(seed)
posterior_expected = np.concatenate([inputs, posteriors.sample(seed=seed2)], axis=-1)
np.testing.assert_array_equal(sample_posterior, posterior_expected)
def test_no_tensorflow_metaclass_overwritten():
"""
LayerWithObservations is a subclass of tf.keras.layers.Layer (via TrackableLayer);
this test ensures that TrackableLayer does not have a metaclass, and hence by adding
the ABCMeta to LayerWithObservations we are not accidentally removing some required
TensorFlow magic metaclass.
"""
assert LayerWithObservations.__bases__ == (TrackableLayer,)
assert type(TrackableLayer) is type
assert type(LayerWithObservations) is abc.ABCMeta
| [
"numpy.testing.assert_equal",
"tensorflow_probability.distributions.MultivariateNormalDiag",
"tensorflow_probability.distributions.MultivariateNormalTriL",
"numpy.testing.assert_allclose",
"numpy.concatenate",
"numpy.testing.assert_array_equal",
"numpy.eye",
"numpy.allclose",
"gpflux.encoders.DirectlyParameterizedNormalDiag",
"numpy.ones",
"tensorflow.keras.backend.set_floatx",
"numpy.random.randn",
"gpflux.layers.LatentVariableLayer",
"tensorflow.random.set_seed",
"gpflow.kullback_leiblers.gauss_kl",
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.full",
"numpy.all"
] | [((897, 935), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (924, 935), True, 'import tensorflow as tf\n'), ((2330, 2370), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (2353, 2370), False, 'import pytest\n'), ((3216, 3256), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (3239, 3256), False, 'import pytest\n'), ((4542, 4582), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (4565, 4582), False, 'import pytest\n'), ((4584, 4628), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed2"""', '[None, 42]'], {}), "('seed2', [None, 42])\n", (4607, 4628), False, 'import pytest\n'), ((1728, 1781), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'encoder': 'None', 'prior': 'distribution'}), '(encoder=None, prior=distribution)\n', (1747, 1781), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((2261, 2282), 'numpy.all', 'np.all', (['(local_kls > 0)'], {}), '(local_kls > 0)\n', (2267, 2282), True, 'import numpy as np\n'), ((2447, 2479), 'numpy.random.randn', 'np.random.randn', (['num_data', 'w_dim'], {}), '(num_data, w_dim)\n', (2462, 2479), True, 'import numpy as np\n'), ((2494, 2549), 'gpflux.encoders.DirectlyParameterizedNormalDiag', 'DirectlyParameterizedNormalDiag', (['num_data', 'w_dim', 'means'], {}), '(num_data, w_dim, means)\n', (2525, 2549), False, 'from gpflux.encoders import DirectlyParameterizedNormalDiag\n'), ((2866, 2888), 'gpflow.kullback_leiblers.gauss_kl', 'gauss_kl', (['q_mu', 'q_sqrt'], {}), '(q_mu, q_sqrt)\n', (2874, 2888), False, 'from gpflow.kullback_leiblers import gauss_kl\n'), ((2955, 3026), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tfp_local_kls', 'gpflow_local_kls'], {'rtol': '(1e-10)'}), '(tfp_local_kls, gpflow_local_kls, rtol=1e-10)\n', (2981, 3026), True, 'import numpy as np\n'), ((3841, 3890), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'encoder': 'encoder', 'prior': 'prior'}), '(encoder=encoder, prior=prior)\n', (3860, 3890), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((3905, 3939), 'numpy.full', 'np.full', (['(num_data, x_dim)', 'np.nan'], {}), '((num_data, x_dim), np.nan)\n', (3912, 3939), True, 'import numpy as np\n'), ((3954, 3988), 'numpy.full', 'np.full', (['(num_data, y_dim)', 'np.nan'], {}), '((num_data, y_dim), np.nan)\n', (3961, 3988), True, 'import numpy as np\n'), ((4047, 4084), 'numpy.concatenate', 'np.concatenate', (['observations'], {'axis': '(-1)'}), '(observations, axis=-1)\n', (4061, 4084), True, 'import numpy as np\n'), ((4461, 4510), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['lv.losses', 'expected_loss'], {}), '(lv.losses, expected_loss)\n', (4484, 4510), True, 'import numpy as np\n'), ((5277, 5326), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'prior': 'prior', 'encoder': 'encoder'}), '(prior=prior, encoder=encoder)\n', (5296, 5326), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((5332, 5356), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5350, 5356), True, 'import tensorflow as tf\n'), ((5403, 5427), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5421, 5427), True, 'import tensorflow as tf\n'), ((5523, 5582), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sample_prior', 'prior_expected'], {}), '(sample_prior, prior_expected)\n', (5552, 5582), True, 'import numpy as np\n'), ((5588, 5612), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5606, 5612), True, 'import tensorflow as tf\n'), ((5710, 5734), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5728, 5734), True, 'import tensorflow as tf\n'), ((5829, 5896), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sample_posterior', 'posterior_expected'], {}), '(sample_posterior, posterior_expected)\n', (5858, 5896), True, 'import numpy as np\n'), ((1236, 1247), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (1244, 1247), True, 'import numpy as np\n'), ((1270, 1279), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (1276, 1279), True, 'import numpy as np\n'), ((1294, 1353), 'tensorflow_probability.distributions.MultivariateNormalTriL', 'tfp.distributions.MultivariateNormalTriL', (['mean', 'scale_tri_l'], {}), '(mean, scale_tri_l)\n', (1334, 1353), True, 'import tensorflow_probability as tfp\n'), ((1369, 1379), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (1376, 1379), True, 'import numpy as np\n'), ((1399, 1450), 'tensorflow_probability.distributions.MultivariateNormalDiag', 'tfp.distributions.MultivariateNormalDiag', (['mean', 'std'], {}), '(mean, std)\n', (1439, 1450), True, 'import tensorflow_probability as tfp\n'), ((3162, 3212), 'numpy.allclose', 'np.allclose', (['actual', 'self.expected'], {'equal_nan': '(True)'}), '(actual, self.expected, equal_nan=True)\n', (3173, 3212), True, 'import numpy as np\n'), ((1094, 1109), 'numpy.zeros', 'np.zeros', (['w_dim'], {}), '(w_dim)\n', (1102, 1109), True, 'import numpy as np\n'), ((1122, 1136), 'numpy.ones', 'np.ones', (['w_dim'], {}), '(w_dim)\n', (1129, 1136), True, 'import numpy as np\n'), ((2686, 2714), 'numpy.random.randn', 'np.random.randn', (['num_data', '(3)'], {}), '(num_data, 3)\n', (2701, 2714), True, 'import numpy as np\n'), ((2716, 2744), 'numpy.random.randn', 'np.random.randn', (['num_data', '(2)'], {}), '(num_data, 2)\n', (2731, 2744), True, 'import numpy as np\n'), ((3485, 3514), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (3500, 3514), True, 'import numpy as np\n'), ((3648, 3682), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (3663, 3682), True, 'import numpy as np\n'), ((4921, 4950), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (4936, 4950), True, 'import numpy as np\n'), ((5084, 5118), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (5099, 5118), True, 'import numpy as np\n'), ((3535, 3564), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (3550, 3564), True, 'import numpy as np\n'), ((3703, 3737), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (3718, 3737), True, 'import numpy as np\n'), ((4971, 5000), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (4986, 5000), True, 'import numpy as np\n'), ((5139, 5173), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (5154, 5173), True, 'import numpy as np\n')] |
from floodsystem.stationdata import build_station_list
from floodsystem.flood import stations_highest_rel_level
def run():
stations = build_station_list()
warning_stations = stations_highest_rel_level(stations,10)
for entry in warning_stations:
print(entry[0].name,entry[1])
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run() | [
"floodsystem.stationdata.build_station_list",
"floodsystem.flood.stations_highest_rel_level"
] | [((139, 159), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (157, 159), False, 'from floodsystem.stationdata import build_station_list\n'), ((183, 223), 'floodsystem.flood.stations_highest_rel_level', 'stations_highest_rel_level', (['stations', '(10)'], {}), '(stations, 10)\n', (209, 223), False, 'from floodsystem.flood import stations_highest_rel_level\n')] |
import logging
import time
from datetime import timedelta
from typing import List
from homeassistant.components.binary_sensor import (
BinarySensorEntity,
DEVICE_CLASS_MOTION
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from wyzeapy.base_client import Device, AccessTokenError
from wyzeapy.client import Client
from wyzeapy.types import PropertyIDs
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Wyze"
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities):
_LOGGER.debug("""Creating new WyzeApi binary sensor component""")
client: Client = hass.data[DOMAIN][config_entry.entry_id]
def get_cameras() -> List[Device]:
try:
return client.get_cameras()
except AccessTokenError as e:
_LOGGER.warning(e)
client.reauthenticate()
return client.get_cameras()
cameras = [WyzeCameraMotion(client, camera) for camera in await hass.async_add_executor_job(get_cameras)]
async_add_entities(cameras, True)
class WyzeCameraMotion(BinarySensorEntity):
_on: bool
_available: bool
def __init__(self, wyzeapi_client: Client, device: Device):
self._client = wyzeapi_client
self._device = device
self._last_event = int(str(int(time.time())) + "000")
@property
def device_info(self):
return {
"identifiers": {
(DOMAIN, self._device.mac)
},
"name": self.name,
"manufacturer": "WyzeLabs",
"model": self._device.product_model
}
@property
def available(self) -> bool:
return self._available
@property
def name(self):
"""Return the display name of this switch."""
return self._device.nickname
@property
def is_on(self):
"""Return true if switch is on."""
return self._on
@property
def unique_id(self):
return "{}-motion".format(self._device.mac)
@property
def device_state_attributes(self):
"""Return device attributes of the entity."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"state": self.is_on,
"available": self.available,
"device model": self._device.product_model,
"mac": self.unique_id
}
@property
def device_class(self):
return DEVICE_CLASS_MOTION
def update(self):
try:
device_info = self._client.get_info(self._device)
except AccessTokenError:
self._client.reauthenticate()
device_info = self._client.get_info(self._device)
for property_id, value in device_info:
if property_id == PropertyIDs.AVAILABLE:
self._available = True if value == "1" else False
latest_event = self._client.get_latest_event(self._device)
if latest_event is not None:
if latest_event.event_ts > self._last_event:
self._on = True
self._last_event = latest_event.event_ts
else:
self._on = False
self._last_event = latest_event.event_ts
else:
self._on = False
| [
"logging.getLogger",
"datetime.timedelta",
"time.time"
] | [((500, 527), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (517, 527), False, 'import logging\n'), ((582, 603), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (591, 603), False, 'from datetime import timedelta\n'), ((1476, 1487), 'time.time', 'time.time', ([], {}), '()\n', (1485, 1487), False, 'import time\n')] |
"""
This script will modulate the blinky lights using the following algorithm:
1) uses user-provided location to obtain row of pixel data from bathy image
2) samples a 'number of LEDs' number of pixels from that row
3) shifts the sampled row data to center it at the location specified by user
4) displays resulting pixels on Blinky Tape
5) shifts next row by a given latitude, also specified by user
6) sleeps for user-specified period of time
Uses the following arguments:
-l/--location: tuple
Location of the user in tuple(lat, lon). This represents the center of the LED strip. Defaults to (0, 0)
-u/--update-interval: int
Update interval of the script, in minutes. Defaults to 10.
-p/--port: str
Serial port of the BlinkyLight (e.g., 'ttyAMA0', 'COM3'). Defaults to 'COM5'.
-d/--delta_latitude: int
Vertical change in latitude every update rate. May be 0, but this will result in a never-changing LEDs.
-i/--image: str
Name of the PNG image that contains the color coded pathymetric data.
The file current named mapserv.png was obtained using the following API:
https://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,180&format=image/png&height=600&width=1200&crs=EPSG:4326&layers=GEBCO_LATEST_SUB_ICE_TOPO&version=1.3.0
In lieu of providing command line arguments, you may alternatively edit the defaults in bath_config.json.
NOTE: runs via:
runfile('/BlinkyTape_Python/bathymetry_blink/bathymetry_blink.py', wdir='/BlinkyTape_Python/')
(C) 2021 <NAME> (https://joeycodes.dev)
MIT Licensed
"""
import optparse
import json
from blinkytape import BlinkyTape
from time import sleep
from PIL import Image
import numpy as np
import sys
MAX_ERRORS = 3
num_errors = 0
# Obtain default parameters
with open("./bathymetry_blink/bathy_config.json") as f:
config = json.load(f)
# Default Blinky Tape port on Raspberry Pi is /dev/ttyACM0
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="portname",
help="serial port (ex: /dev/ttyACM0)", default=config["port"])
parser.add_option("-l", "--location", dest="location",
help="Location of the center of the LED strip (ex: 70,-110)", default=config["location"])
parser.add_option("-u", "--update-rate", dest="update_rate",
help="How often to update elevation profile (mins) (ex: 5)", default=config["update_rate"])
parser.add_option("-d", "--delta-latitude", dest="delta_latitude",
help="Change in latitude during update (ex: 5)", default=config["delta_latitude"])
parser.add_option("-n", "--num-leds", dest="num_leds",
help="Number of LEDs in strip (ex: 60)", default=config["num_leds"])
parser.add_option("-i", "--image", dest="image_name",
help="Name of the map/bathymetry image (ex: ./mapserv.png)", default=config["image"])
(options, args) = parser.parse_args()
if args:
print("Unknown parameters: " + args)
# grab the values provided by user (or defaults)
port = options.portname
loc = options.location
rate = options.update_rate
delta = options.delta_latitude
n_leds = options.num_leds
i_name = options.image_name
# Some visual indication that it works, for headless setups (green tape)
bt = BlinkyTape(port, n_leds)
bt.displayColor(0, 100, 0)
bt.show()
sleep(2)
while True:
try:
# first, load image
im = Image.open(i_name) # Can be many different formats.
cols, rows = im.size
a = np.asarray(im) # of shape (rows, cols, channels)
# map loc latitude to 0-based index
latitude_index = min(rows - 1, max(0, (int)(((loc[0] - -90) / (90 - -90)) * (rows - 0) + 0)))
longitude_index = min(cols - 1, max(0, (int)(((loc[1] - -180) / (180 - -180)) * (cols - 0) + 0)))
# update the location of the next row of elevation data to take
loc[0] += delta
loc[0] = ((loc[0] + 90) % 180) - 90 # wraps to next pole if overflow
print("Lat index: " + str(latitude_index))
print("Lon index: " + str(longitude_index))
print("Next latitude: " + str(loc[0]))
# grab the applicable pixel indices
indices = [(int)(x*(cols/n_leds)) for x in range(n_leds)]
# sample that row of pixel data
output_pixels = np.take(a[latitude_index], indices, axis=0)
# rotate the row to center around the specified longitude
output_pixels = np.roll(output_pixels, longitude_index, axis=0)
# send all pixel data to bt
for pixel in output_pixels:
print("Sending r: {}, g: {}, b: {}".format(*pixel))
bt.sendPixel(*pixel)
# finally, show the image
bt.show()
# delete variables for memory management
del a
del im
# Tape resets to stored pattern after a few seconds of inactivity
sleep(rate * 60) # Wait specified number of minutes
# sleep(10) # Wait specified number of minutes
except KeyboardInterrupt:
print("Keyboard interrupt, ending program.")
sys.exit()
except RuntimeError as e:
print("Encountered runtime error: " + e.args[0])
# flush any incomplete data
bt.show()
num_errors += 1
if num_errors > MAX_ERRORS:
sys.exit("Error count exceeds that allowed.")
| [
"PIL.Image.open",
"numpy.roll",
"numpy.asarray",
"optparse.OptionParser",
"time.sleep",
"numpy.take",
"blinkytape.BlinkyTape",
"sys.exit",
"json.load"
] | [((1945, 1968), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (1966, 1968), False, 'import optparse\n'), ((3304, 3328), 'blinkytape.BlinkyTape', 'BlinkyTape', (['port', 'n_leds'], {}), '(port, n_leds)\n', (3314, 3328), False, 'from blinkytape import BlinkyTape\n'), ((3366, 3374), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (3371, 3374), False, 'from time import sleep\n'), ((1863, 1875), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1872, 1875), False, 'import json\n'), ((3438, 3456), 'PIL.Image.open', 'Image.open', (['i_name'], {}), '(i_name)\n', (3448, 3456), False, 'from PIL import Image\n'), ((3532, 3546), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (3542, 3546), True, 'import numpy as np\n'), ((4371, 4414), 'numpy.take', 'np.take', (['a[latitude_index]', 'indices'], {'axis': '(0)'}), '(a[latitude_index], indices, axis=0)\n', (4378, 4414), True, 'import numpy as np\n'), ((4514, 4561), 'numpy.roll', 'np.roll', (['output_pixels', 'longitude_index'], {'axis': '(0)'}), '(output_pixels, longitude_index, axis=0)\n', (4521, 4561), True, 'import numpy as np\n'), ((4971, 4987), 'time.sleep', 'sleep', (['(rate * 60)'], {}), '(rate * 60)\n', (4976, 4987), False, 'from time import sleep\n'), ((5172, 5182), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5180, 5182), False, 'import sys\n'), ((5423, 5468), 'sys.exit', 'sys.exit', (['"""Error count exceeds that allowed."""'], {}), "('Error count exceeds that allowed.')\n", (5431, 5468), False, 'import sys\n')] |
# pylint: disable=protected-access
import os
import re
import pytest
from dagster import file_relative_path
from dagster.core.errors import DagsterInstanceMigrationRequired
from dagster.core.instance import DagsterInstance, InstanceRef
from dagster.utils.test import restore_directory
# test that we can load runs and events from an old instance
def test_0_6_4():
test_dir = file_relative_path(__file__, 'snapshot_0_6_4')
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
runs = instance.get_runs()
with pytest.raises(
DagsterInstanceMigrationRequired,
match=re.escape(
'Instance is out of date and must be migrated (SqliteEventLogStorage for run '
'c7a6c4d7-6c88-46d0-8baa-d4937c3cefe5). Database is at revision None, head is '
'567bc23fd1ac. Please run `dagster instance migrate`.'
),
):
for run in runs:
instance.all_logs(run.run_id)
def test_0_6_6_sqlite_exc():
test_dir = file_relative_path(__file__, 'snapshot_0_6_6/sqlite')
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
runs = instance.get_runs()
# Note that this is a deliberate choice -- old runs are simply invisible, and their
# presence won't raise DagsterInstanceMigrationRequired. This is a reasonable choice since
# the runs.db has moved and otherwise we would have to do a check for the existence of an
# old runs.db every time we accessed the runs. Instead, we'll do this only in the upgrade
# method.
assert len(runs) == 0
run_ids = instance._event_storage.get_all_run_ids()
assert run_ids == ['89296095-892d-4a15-aa0d-9018d1580945']
with pytest.raises(
DagsterInstanceMigrationRequired,
match=re.escape(
'Instance is out of date and must be migrated (SqliteEventLogStorage for run '
'89296095-892d-4a15-aa0d-9018d1580945). Database is at revision None, head is '
'567bc23fd1ac. Please run `dagster instance migrate`.'
),
):
instance._event_storage.get_logs_for_run('89296095-892d-4a15-aa0d-9018d1580945')
def test_0_6_6_sqlite_migrate():
test_dir = file_relative_path(__file__, 'snapshot_0_6_6/sqlite')
assert os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/runs.db'))
assert not os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/history/runs.db'))
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
instance.upgrade()
runs = instance.get_runs()
assert len(runs) == 1
run_ids = instance._event_storage.get_all_run_ids()
assert run_ids == ['89296095-892d-4a15-aa0d-9018d1580945']
instance._event_storage.get_logs_for_run('89296095-892d-4a15-aa0d-9018d1580945')
assert not os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/runs.db'))
assert os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/history/runs.db'))
| [
"dagster.file_relative_path",
"dagster.core.instance.InstanceRef.from_dir",
"dagster.utils.test.restore_directory",
"re.escape"
] | [((383, 429), 'dagster.file_relative_path', 'file_relative_path', (['__file__', '"""snapshot_0_6_4"""'], {}), "(__file__, 'snapshot_0_6_4')\n", (401, 429), False, 'from dagster import file_relative_path\n'), ((1092, 1145), 'dagster.file_relative_path', 'file_relative_path', (['__file__', '"""snapshot_0_6_6/sqlite"""'], {}), "(__file__, 'snapshot_0_6_6/sqlite')\n", (1110, 1145), False, 'from dagster import file_relative_path\n'), ((2393, 2446), 'dagster.file_relative_path', 'file_relative_path', (['__file__', '"""snapshot_0_6_6/sqlite"""'], {}), "(__file__, 'snapshot_0_6_6/sqlite')\n", (2411, 2446), False, 'from dagster import file_relative_path\n'), ((439, 466), 'dagster.utils.test.restore_directory', 'restore_directory', (['test_dir'], {}), '(test_dir)\n', (456, 466), False, 'from dagster.utils.test import restore_directory\n'), ((1155, 1182), 'dagster.utils.test.restore_directory', 'restore_directory', (['test_dir'], {}), '(test_dir)\n', (1172, 1182), False, 'from dagster.utils.test import restore_directory\n'), ((2473, 2534), 'dagster.file_relative_path', 'file_relative_path', (['__file__', '"""snapshot_0_6_6/sqlite/runs.db"""'], {}), "(__file__, 'snapshot_0_6_6/sqlite/runs.db')\n", (2491, 2534), False, 'from dagster import file_relative_path\n'), ((2647, 2674), 'dagster.utils.test.restore_directory', 'restore_directory', (['test_dir'], {}), '(test_dir)\n', (2664, 2674), False, 'from dagster.utils.test import restore_directory\n'), ((512, 542), 'dagster.core.instance.InstanceRef.from_dir', 'InstanceRef.from_dir', (['test_dir'], {}), '(test_dir)\n', (532, 542), False, 'from dagster.core.instance import DagsterInstance, InstanceRef\n'), ((1228, 1258), 'dagster.core.instance.InstanceRef.from_dir', 'InstanceRef.from_dir', (['test_dir'], {}), '(test_dir)\n', (1248, 1258), False, 'from dagster.core.instance import DagsterInstance, InstanceRef\n'), ((2566, 2635), 'dagster.file_relative_path', 'file_relative_path', (['__file__', '"""snapshot_0_6_6/sqlite/history/runs.db"""'], {}), "(__file__, 'snapshot_0_6_6/sqlite/history/runs.db')\n", (2584, 2635), False, 'from dagster import file_relative_path\n'), ((2720, 2750), 'dagster.core.instance.InstanceRef.from_dir', 'InstanceRef.from_dir', (['test_dir'], {}), '(test_dir)\n', (2740, 2750), False, 'from dagster.core.instance import DagsterInstance, InstanceRef\n'), ((3191, 3260), 'dagster.file_relative_path', 'file_relative_path', (['__file__', '"""snapshot_0_6_6/sqlite/history/runs.db"""'], {}), "(__file__, 'snapshot_0_6_6/sqlite/history/runs.db')\n", (3209, 3260), False, 'from dagster import file_relative_path\n'), ((3098, 3159), 'dagster.file_relative_path', 'file_relative_path', (['__file__', '"""snapshot_0_6_6/sqlite/runs.db"""'], {}), "(__file__, 'snapshot_0_6_6/sqlite/runs.db')\n", (3116, 3159), False, 'from dagster import file_relative_path\n'), ((672, 900), 're.escape', 're.escape', (['"""Instance is out of date and must be migrated (SqliteEventLogStorage for run c7a6c4d7-6c88-46d0-8baa-d4937c3cefe5). Database is at revision None, head is 567bc23fd1ac. Please run `dagster instance migrate`."""'], {}), "(\n 'Instance is out of date and must be migrated (SqliteEventLogStorage for run c7a6c4d7-6c88-46d0-8baa-d4937c3cefe5). Database is at revision None, head is 567bc23fd1ac. Please run `dagster instance migrate`.'\n )\n", (681, 900), False, 'import re\n'), ((1951, 2179), 're.escape', 're.escape', (['"""Instance is out of date and must be migrated (SqliteEventLogStorage for run 89296095-892d-4a15-aa0d-9018d1580945). Database is at revision None, head is 567bc23fd1ac. Please run `dagster instance migrate`."""'], {}), "(\n 'Instance is out of date and must be migrated (SqliteEventLogStorage for run 89296095-892d-4a15-aa0d-9018d1580945). Database is at revision None, head is 567bc23fd1ac. Please run `dagster instance migrate`.'\n )\n", (1960, 2179), False, 'import re\n')] |
import matplotlib.pyplot as plt
import pandas as pd
def group_by_category(df):
grouped = df.groupby(['CATEGORY']).size().to_frame('Crimes')
labels = ['Trespassing', 'Vehicle theft', 'General Theft',
'Damage to Property', 'Robbery', 'Homicide']
p = grouped.plot.pie(y='Crimes', labels=labels, autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Category')
p.get_legend().remove()
plt.savefig('../charts/category.png')
def group_by_time_of_day(df):
grouped = df.groupby(['TIME_OF_DAY']).size().to_frame('Crimes')
p = grouped.plot.pie(y='Crimes', labels=['Day', 'Evening', 'Night'], autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Time of Day')
p.get_legend().remove()
plt.savefig('../charts/time_of_day.png')
def group_by_day_of_the_week(df):
grouped = df.groupby(['DAY_OF_THE_WEEK']).size().to_frame('Crimes')
labels = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
p = grouped.plot.pie(y='Crimes', labels=labels, autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Day of The Week')
p.get_legend().remove()
plt.savefig('../charts/day_of_the_week.png')
def group_by_month(df):
grouped = df.groupby(['MONTH']).size().to_frame('Size')
grouped['Percentage'] = 100 * grouped['Size'] / len(df)
grouped = grouped.drop(columns='Size')
p = grouped.plot.bar()
p.set_title('Crimes Percentage Grouped By Month')
p.set_ylabel('Percentage of Crimes')
p.set_xlabel('Month')
p.get_legend().remove()
plt.savefig('../charts/month.png')
def group_by_year(df):
grouped = df.groupby(['YEAR']).size().to_frame('Crimes')
p = grouped.plot.pie(y='Crimes', autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Year')
p.get_legend().remove()
plt.savefig('../charts/year.png')
def group_by_territory(df):
grouped = df.groupby(['PDQ']).size().to_frame('Size')
grouped['Percentage'] = 100 * grouped['Size'] / len(df)
grouped = grouped.drop(columns='Size')
grouped.index = grouped.index.astype(int)
p = grouped.plot.bar()
p.set_title('Crimes Percentage Grouped By Territory')
p.set_ylabel('Percentage of Crimes')
p.set_xlabel('Territory Number')
p.get_legend().remove()
plt.savefig('../charts/territory.png')
if __name__ == '__main__':
df = pd.read_csv('../data/crimes_dataset_processed_incomplete.csv')
group_by_territory(df)
group_by_year(df)
group_by_month(df)
group_by_time_of_day(df)
group_by_day_of_the_week(df)
group_by_category(df)
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv"
] | [((428, 465), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../charts/category.png"""'], {}), "('../charts/category.png')\n", (439, 465), True, 'import matplotlib.pyplot as plt\n'), ((749, 789), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../charts/time_of_day.png"""'], {}), "('../charts/time_of_day.png')\n", (760, 789), True, 'import matplotlib.pyplot as plt\n'), ((1156, 1200), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../charts/day_of_the_week.png"""'], {}), "('../charts/day_of_the_week.png')\n", (1167, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1603), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../charts/month.png"""'], {}), "('../charts/month.png')\n", (1580, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1830, 1863), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../charts/year.png"""'], {}), "('../charts/year.png')\n", (1841, 1863), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2333), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../charts/territory.png"""'], {}), "('../charts/territory.png')\n", (2306, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2434), 'pandas.read_csv', 'pd.read_csv', (['"""../data/crimes_dataset_processed_incomplete.csv"""'], {}), "('../data/crimes_dataset_processed_incomplete.csv')\n", (2383, 2434), True, 'import pandas as pd\n')] |
# Unit tests
import unittest
def run_test_instance(unittestinstance, profile, committeesize, tests):
import rules_approval
# all rules used?
for rule in rules_approval.MWRULES:
unittestinstance.assertTrue(rule in tests.keys())
for rule in tests.keys():
output = rules_approval.compute_rule(rule, profile,
committeesize,
resolute=False)
unittestinstance.assertEqual(
output, tests[rule], msg=rules_approval.MWRULES[rule] + " failed")
output = rules_approval.compute_rule(
rule, profile, committeesize, resolute=True)
unittestinstance.assertEqual(
len(output), 1,
msg=rules_approval.MWRULES[rule] + " failed with resolute=True")
unittestinstance.assertTrue(
output[0] in tests[rule],
msg=rules_approval.MWRULES[rule] + " failed with resolute=True")
class TestApprovalMultiwinner(unittest.TestCase):
def test_createprofiles(self):
from preferences import Profile
from preferences import DichotomousPreferences
num_cand = 7
prof = Profile(num_cand)
self.assertEqual(prof.add_preferences(
DichotomousPreferences([0, 4, 5])),
None)
with self.assertRaises(Exception):
prof.add_preferences(DichotomousPreferences([num_cand]))
with self.assertRaises(Exception):
prof.add_preferences(DichotomousPreferences([-1]))
self.assertEqual(prof.add_preferences([0, 4, 5]), None)
with self.assertRaises(Exception):
prof.add_preferences([0, 4, 5, "1"])
with self.assertRaises(Exception):
prof.add_preferences(["1", 0, 4, 5])
p1 = DichotomousPreferences([0, 4, 5])
p2 = DichotomousPreferences([1, 2])
self.assertEqual(prof.add_preferences([p1, p2]), None)
self.assertTrue(prof.has_unit_weights())
prof.add_preferences(DichotomousPreferences([0, 4, 5], 2.4))
self.assertFalse(prof.has_unit_weights())
self.assertEqual(prof.totalweight(), 6.4)
def test_mwrules__toofewcandidates(self):
from preferences import Profile
import rules_approval
profile = Profile(5)
committeesize = 4
preflist = [[0, 1, 2], [1], [1, 2], [0]]
profile.add_preferences(preflist)
for rule in rules_approval.MWRULES.keys():
with self.assertRaises(Exception):
rules_approval.compute_rule(rule, profile, committeesize)
with self.assertRaises(Exception):
rules_approval.compute_rule(rule, profile,
committeesize, resolute=True)
def test_mwrules_weightsconsidered(self):
from preferences import Profile
from preferences import DichotomousPreferences
import rules_approval
self.longMessage = True
profile = Profile(3)
profile.add_preferences(DichotomousPreferences([0]))
profile.add_preferences(DichotomousPreferences([0]))
profile.add_preferences(DichotomousPreferences([1], 5))
profile.add_preferences(DichotomousPreferences([0]))
committeesize = 1
for rule in rules_approval.MWRULES.keys():
if "monroe" in rule or "rule-x" in rule:
# Monroe and rule x only work with unit weights:
continue
result = rules_approval.compute_rule(rule, profile, committeesize)
self.assertTrue([1] in result,
msg=rule + " failed"+str(result))
def test_mwrules_correct_simple(self):
from preferences import Profile
import rules_approval
self.longMessage = True
profile = Profile(4)
profile.add_preferences([[0], [1], [2], [3]])
committeesize = 2
for rule in rules_approval.MWRULES.keys():
if rule == "greedy-monroe": # always returns one committee
continue
self.assertEqual(len(rules_approval.compute_rule(rule, profile,
committeesize)),
6, msg=rule + " failed")
for rule in rules_approval.MWRULES.keys():
self.assertEqual(len(rules_approval.compute_rule(rule, profile,
committeesize,
resolute=True)),
1, msg=rule + " failed with resolute=True")
def test_monroe_indivisible(self):
from preferences import Profile
import rules_approval
self.longMessage = True
profile = Profile(4)
profile.add_preferences([[0], [0], [0], [1, 2], [1, 2], [1], [3]])
committeesize = 3
for ilp in [True, False]:
# max Monroe score is 6 (even for committee [0, 1, 3])
self.assertEqual(
rules_approval.compute_monroe(profile, committeesize,
ilp=ilp, resolute=False),
[[0, 1, 2], [0, 1, 3], [0, 2, 3]])
# this test shows that tiebreaking is not (yet)
# implemented for opt-Phragmen
def test_optphrag_notiebreaking(self):
from preferences import Profile
from rules_approval import compute_rule
self.longMessage = True
profile = Profile(6)
profile.add_preferences([[0], [0], [1, 3], [1, 3], [1, 4],
[2, 4], [2, 5], [2, 5]])
committeesize = 3
self.assertEqual(
len(compute_rule("optphrag", profile, committeesize,
resolute=False)),
12)
def test_mwrules_correct_advanced_1(self):
from preferences import Profile
self.longMessage = True
committeesize = 4
profile = Profile(6)
preflist = [[0, 4, 5], [0], [1, 4, 5], [1],
[2, 4, 5], [2], [3, 4, 5], [3]]
profile.add_preferences(preflist)
tests1 = {
"seqpav": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"av": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"sav": [[0, 1, 2, 3], [0, 1, 2, 4], [0, 1, 2, 5], [0, 1, 3, 4],
[0, 1, 3, 5], [0, 1, 4, 5], [0, 2, 3, 4], [0, 2, 3, 5],
[0, 2, 4, 5], [0, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"pav-ilp": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"pav-noilp": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"revseqpav": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"minimaxav-noilp": [[0, 1, 2, 3], [0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5], [0, 1, 4, 5],
[0, 2, 3, 4], [0, 2, 3, 5], [0, 2, 4, 5],
[0, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"minimaxav-ilp": [[0, 1, 2, 3], [0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5], [0, 1, 4, 5],
[0, 2, 3, 4], [0, 2, 3, 5], [0, 2, 4, 5],
[0, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"phrag": [[0, 1, 4, 5], [0, 2, 4, 5], [0, 3, 4, 5],
[1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]],
"optphrag": [[0, 1, 2, 3]],
"cc-ilp": [[0, 1, 2, 3]],
"cc-noilp": [[0, 1, 2, 3]],
"seqcc": [[0, 1, 2, 4], [0, 1, 2, 5], [0, 1, 3, 4], [0, 1, 3, 5],
[0, 2, 3, 4], [0, 2, 3, 5], [1, 2, 3, 4], [1, 2, 3, 5]],
"revseqcc": [[0, 1, 2, 3]],
"monroe-ilp": [[0, 1, 2, 3]],
"monroe-noilp": [[0, 1, 2, 3]],
"greedy-monroe": [[0, 2, 3, 4]],
"slav-ilp": [[0, 1, 2, 3],
[0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5],
[0, 2, 3, 4], [0, 2, 3, 5],
[1, 2, 3, 4], [1, 2, 3, 5]],
"slav-noilp": [[0, 1, 2, 3],
[0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5],
[0, 2, 3, 4], [0, 2, 3, 5],
[1, 2, 3, 4], [1, 2, 3, 5]],
"seqslav": [[0, 1, 2, 4], [0, 1, 2, 5],
[0, 1, 3, 4], [0, 1, 3, 5],
[0, 2, 3, 4], [0, 2, 3, 5],
[1, 2, 3, 4], [1, 2, 3, 5]],
"rule-x": [[0, 1, 4, 5], [0, 2, 4, 5],
[0, 3, 4, 5], [1, 2, 4, 5],
[1, 3, 4, 5], [2, 3, 4, 5]],
"phragmen-enestroem": [[0, 1, 4, 5], [0, 2, 4, 5],
[0, 3, 4, 5], [1, 2, 4, 5],
[1, 3, 4, 5], [2, 3, 4, 5]],
}
run_test_instance(self, profile, committeesize, tests1)
# and now with reversed preflist
preflist.reverse()
for p in preflist:
p.reverse()
profile = Profile(6)
profile.add_preferences(preflist)
run_test_instance(self, profile, committeesize, tests1)
def test_mwrules_correct_advanced_2(self):
from preferences import Profile
self.longMessage = True
# and another profile
profile = Profile(5)
committeesize = 3
preflist = [[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2],
[0, 1, 2], [0, 1], [3, 4], [3, 4], [3]]
profile.add_preferences(preflist)
tests2 = {
"seqpav": [[0, 1, 3]],
"av": [[0, 1, 2]],
"sav": [[0, 1, 3]],
"pav-ilp": [[0, 1, 3]],
"pav-noilp": [[0, 1, 3]],
"revseqpav": [[0, 1, 3]],
"minimaxav-noilp": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"minimaxav-ilp": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"phrag": [[0, 1, 3]],
"optphrag": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"cc-ilp": [[0, 1, 3], [0, 2, 3], [0, 3, 4],
[1, 2, 3], [1, 3, 4]],
"cc-noilp": [[0, 1, 3], [0, 2, 3], [0, 3, 4],
[1, 2, 3], [1, 3, 4]],
"seqcc": [[0, 1, 3], [0, 2, 3], [0, 3, 4],
[1, 2, 3], [1, 3, 4]],
"revseqcc": [[0, 1, 3], [0, 2, 3], [0, 3, 4],
[1, 2, 3], [1, 3, 4]],
"monroe-ilp": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"monroe-noilp": [[0, 1, 3], [0, 2, 3], [1, 2, 3]],
"greedy-monroe": [[0, 1, 3]],
"seqslav": [[0, 1, 3]],
"slav-ilp": [[0, 1, 3]],
"slav-noilp": [[0, 1, 3]],
"rule-x": [[0, 1, 3]],
"phragmen-enestroem": [[0, 1, 3]],
}
run_test_instance(self, profile, committeesize, tests2)
def test_mwrules_correct_advanced_3(self):
from preferences import Profile
self.longMessage = True
# and a third profile
profile = Profile(6)
committeesize = 4
preflist = [[0, 3, 4, 5], [1, 2], [0, 2, 5], [2],
[0, 1, 2, 3, 4], [0, 3, 4], [0, 2, 4], [0, 1]]
profile.add_preferences(preflist)
tests3 = {
"seqpav": [[0, 1, 2, 4]],
"av": [[0, 1, 2, 4], [0, 2, 3, 4]],
"sav": [[0, 1, 2, 4]],
"pav-ilp": [[0, 1, 2, 4]],
"pav-noilp": [[0, 1, 2, 4]],
"revseqpav": [[0, 1, 2, 4]],
"minimaxav-noilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 2, 3, 4], [0, 2, 3, 5],
[0, 2, 4, 5]],
"minimaxav-ilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 2, 3, 4], [0, 2, 3, 5],
[0, 2, 4, 5]],
"phrag": [[0, 1, 2, 4]],
"optphrag": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"cc-ilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"cc-noilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"seqcc": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5]],
"revseqcc": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"monroe-ilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"monroe-noilp": [[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2, 5], [0, 2, 3, 4],
[0, 2, 3, 5], [0, 2, 4, 5],
[1, 2, 3, 4], [1, 2, 3, 5],
[1, 2, 4, 5]],
"greedy-monroe": [[0, 1, 2, 3]],
"seqslav": [[0, 1, 2, 4]],
"slav-ilp": [[0, 1, 2, 4]],
"slav-noilp": [[0, 1, 2, 4]],
"rule-x": [[0, 1, 2, 4]],
"phragmen-enestroem": [[0, 1, 2, 4]],
}
run_test_instance(self, profile, committeesize, tests3)
def test_monroescore(self):
from preferences import Profile
from score_functions import monroescore_flowbased, monroescore_matching
self.longMessage = True
# and a third profile
profile = Profile(6)
preflist = [[0, 1], [1], [1, 3], [4], [2], [1, 5, 3]]
profile.add_preferences(preflist)
self.assertEqual(monroescore_flowbased(profile, [1, 3, 2]), 5)
self.assertEqual(monroescore_matching(profile, [1, 3, 2]), 5)
self.assertEqual(monroescore_flowbased(profile, [2, 1, 5]), 4)
self.assertEqual(monroescore_matching(profile, [2, 1, 5]), 4)
self.assertEqual(monroescore_flowbased(profile, [2, 4, 5]), 3)
self.assertEqual(monroescore_matching(profile, [2, 5, 4]), 3)
if __name__ == '__main__':
unittest.main()
| [
"rules_approval.compute_rule",
"score_functions.monroescore_matching",
"preferences.Profile",
"rules_approval.compute_monroe",
"rules_approval.MWRULES.keys",
"preferences.DichotomousPreferences",
"score_functions.monroescore_flowbased",
"unittest.main"
] | [((15333, 15348), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15346, 15348), False, 'import unittest\n'), ((301, 374), 'rules_approval.compute_rule', 'rules_approval.compute_rule', (['rule', 'profile', 'committeesize'], {'resolute': '(False)'}), '(rule, profile, committeesize, resolute=False)\n', (328, 374), False, 'import rules_approval\n'), ((599, 671), 'rules_approval.compute_rule', 'rules_approval.compute_rule', (['rule', 'profile', 'committeesize'], {'resolute': '(True)'}), '(rule, profile, committeesize, resolute=True)\n', (626, 671), False, 'import rules_approval\n'), ((1198, 1215), 'preferences.Profile', 'Profile', (['num_cand'], {}), '(num_cand)\n', (1205, 1215), False, 'from preferences import Profile\n'), ((1808, 1841), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[0, 4, 5]'], {}), '([0, 4, 5])\n', (1830, 1841), False, 'from preferences import DichotomousPreferences\n'), ((1855, 1885), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[1, 2]'], {}), '([1, 2])\n', (1877, 1885), False, 'from preferences import DichotomousPreferences\n'), ((2302, 2312), 'preferences.Profile', 'Profile', (['(5)'], {}), '(5)\n', (2309, 2312), False, 'from preferences import Profile\n'), ((2451, 2480), 'rules_approval.MWRULES.keys', 'rules_approval.MWRULES.keys', ([], {}), '()\n', (2478, 2480), False, 'import rules_approval\n'), ((3007, 3017), 'preferences.Profile', 'Profile', (['(3)'], {}), '(3)\n', (3014, 3017), False, 'from preferences import Profile\n'), ((3312, 3341), 'rules_approval.MWRULES.keys', 'rules_approval.MWRULES.keys', ([], {}), '()\n', (3339, 3341), False, 'import rules_approval\n'), ((3836, 3846), 'preferences.Profile', 'Profile', (['(4)'], {}), '(4)\n', (3843, 3846), False, 'from preferences import Profile\n'), ((3948, 3977), 'rules_approval.MWRULES.keys', 'rules_approval.MWRULES.keys', ([], {}), '()\n', (3975, 3977), False, 'import rules_approval\n'), ((4306, 4335), 'rules_approval.MWRULES.keys', 'rules_approval.MWRULES.keys', ([], {}), '()\n', (4333, 4335), False, 'import rules_approval\n'), ((4802, 4812), 'preferences.Profile', 'Profile', (['(4)'], {}), '(4)\n', (4809, 4812), False, 'from preferences import Profile\n'), ((5510, 5520), 'preferences.Profile', 'Profile', (['(6)'], {}), '(6)\n', (5517, 5520), False, 'from preferences import Profile\n'), ((6005, 6015), 'preferences.Profile', 'Profile', (['(6)'], {}), '(6)\n', (6012, 6015), False, 'from preferences import Profile\n'), ((9721, 9731), 'preferences.Profile', 'Profile', (['(6)'], {}), '(6)\n', (9728, 9731), False, 'from preferences import Profile\n'), ((10009, 10019), 'preferences.Profile', 'Profile', (['(5)'], {}), '(5)\n', (10016, 10019), False, 'from preferences import Profile\n'), ((11684, 11694), 'preferences.Profile', 'Profile', (['(6)'], {}), '(6)\n', (11691, 11694), False, 'from preferences import Profile\n'), ((14761, 14771), 'preferences.Profile', 'Profile', (['(6)'], {}), '(6)\n', (14768, 14771), False, 'from preferences import Profile\n'), ((2027, 2065), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[0, 4, 5]', '(2.4)'], {}), '([0, 4, 5], 2.4)\n', (2049, 2065), False, 'from preferences import DichotomousPreferences\n'), ((3050, 3077), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[0]'], {}), '([0])\n', (3072, 3077), False, 'from preferences import DichotomousPreferences\n'), ((3111, 3138), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[0]'], {}), '([0])\n', (3133, 3138), False, 'from preferences import DichotomousPreferences\n'), ((3172, 3202), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[1]', '(5)'], {}), '([1], 5)\n', (3194, 3202), False, 'from preferences import DichotomousPreferences\n'), ((3236, 3263), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[0]'], {}), '([0])\n', (3258, 3263), False, 'from preferences import DichotomousPreferences\n'), ((3507, 3564), 'rules_approval.compute_rule', 'rules_approval.compute_rule', (['rule', 'profile', 'committeesize'], {}), '(rule, profile, committeesize)\n', (3534, 3564), False, 'import rules_approval\n'), ((14902, 14943), 'score_functions.monroescore_flowbased', 'monroescore_flowbased', (['profile', '[1, 3, 2]'], {}), '(profile, [1, 3, 2])\n', (14923, 14943), False, 'from score_functions import monroescore_flowbased, monroescore_matching\n'), ((14973, 15013), 'score_functions.monroescore_matching', 'monroescore_matching', (['profile', '[1, 3, 2]'], {}), '(profile, [1, 3, 2])\n', (14993, 15013), False, 'from score_functions import monroescore_flowbased, monroescore_matching\n'), ((15043, 15084), 'score_functions.monroescore_flowbased', 'monroescore_flowbased', (['profile', '[2, 1, 5]'], {}), '(profile, [2, 1, 5])\n', (15064, 15084), False, 'from score_functions import monroescore_flowbased, monroescore_matching\n'), ((15114, 15154), 'score_functions.monroescore_matching', 'monroescore_matching', (['profile', '[2, 1, 5]'], {}), '(profile, [2, 1, 5])\n', (15134, 15154), False, 'from score_functions import monroescore_flowbased, monroescore_matching\n'), ((15184, 15225), 'score_functions.monroescore_flowbased', 'monroescore_flowbased', (['profile', '[2, 4, 5]'], {}), '(profile, [2, 4, 5])\n', (15205, 15225), False, 'from score_functions import monroescore_flowbased, monroescore_matching\n'), ((15255, 15295), 'score_functions.monroescore_matching', 'monroescore_matching', (['profile', '[2, 5, 4]'], {}), '(profile, [2, 5, 4])\n', (15275, 15295), False, 'from score_functions import monroescore_flowbased, monroescore_matching\n'), ((1275, 1308), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[0, 4, 5]'], {}), '([0, 4, 5])\n', (1297, 1308), False, 'from preferences import DichotomousPreferences\n'), ((1405, 1439), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[num_cand]'], {}), '([num_cand])\n', (1427, 1439), False, 'from preferences import DichotomousPreferences\n'), ((1517, 1545), 'preferences.DichotomousPreferences', 'DichotomousPreferences', (['[-1]'], {}), '([-1])\n', (1539, 1545), False, 'from preferences import DichotomousPreferences\n'), ((2545, 2602), 'rules_approval.compute_rule', 'rules_approval.compute_rule', (['rule', 'profile', 'committeesize'], {}), '(rule, profile, committeesize)\n', (2572, 2602), False, 'import rules_approval\n'), ((2666, 2738), 'rules_approval.compute_rule', 'rules_approval.compute_rule', (['rule', 'profile', 'committeesize'], {'resolute': '(True)'}), '(rule, profile, committeesize, resolute=True)\n', (2693, 2738), False, 'import rules_approval\n'), ((5062, 5140), 'rules_approval.compute_monroe', 'rules_approval.compute_monroe', (['profile', 'committeesize'], {'ilp': 'ilp', 'resolute': '(False)'}), '(profile, committeesize, ilp=ilp, resolute=False)\n', (5091, 5140), False, 'import rules_approval\n'), ((5719, 5783), 'rules_approval.compute_rule', 'compute_rule', (['"""optphrag"""', 'profile', 'committeesize'], {'resolute': '(False)'}), "('optphrag', profile, committeesize, resolute=False)\n", (5731, 5783), False, 'from rules_approval import compute_rule\n'), ((4110, 4167), 'rules_approval.compute_rule', 'rules_approval.compute_rule', (['rule', 'profile', 'committeesize'], {}), '(rule, profile, committeesize)\n', (4137, 4167), False, 'import rules_approval\n'), ((4370, 4442), 'rules_approval.compute_rule', 'rules_approval.compute_rule', (['rule', 'profile', 'committeesize'], {'resolute': '(True)'}), '(rule, profile, committeesize, resolute=True)\n', (4397, 4442), False, 'import rules_approval\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016, 2017 Red Hat, Inc.
# Red Hat Author: <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Client for applications relying on OpenID Connect for authentication."""
from __future__ import print_function
from copy import copy
import json
import logging
from threading import Lock
import time
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import socket
import os
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from uuid import uuid4 as uuidgen
import webbrowser
from wsgiref import simple_server
import requests
import sys
from openidc_client import release
# The ports that we will try to use for our webserver
WEB_PORTS = [12345, 23456]
class OpenIDCClient(object):
# Internal implementation of tokens:
# Every app id has its own token cache
# The token cache is a json serialized dict
# This dict contains uuid: token pairs
# Every "token" object is a json dict with the following keys:
# idp: The URL of the idp that issued the token
# sub: The subject that owns the token
# access_token: Token value
# token_type: Token type. Currently supported: "Bearer"
# expires_at: Token expiration UTC time. NOTE: Even if the expires_at
# indicates the token should still be valid, it may have been revoked by
# the user! Also, even if it has expired, we might still be able to
# refresh the token.
# refresh_token: The token we can use to refresh the access token
# scopes: A list of scopes that we had requested with the token
def __init__(self, app_identifier, id_provider, id_provider_mapping,
client_id, client_secret=None, use_post=False, useragent=None,
cachedir=None, printfd=sys.stdout):
"""Client for interacting with web services relying on OpenID Connect.
:param app_identifier: Identifier for storage of retrieved tokens
:param id_provider: URL of the identity provider to get tokens from
:param id_provider_mapping: Mapping with URLs to use for specific
endpoints on the IdP.
:kwarg use_post: Whether to use POST submission of client secrets
rather than Authorization header
:kwarg client_id: The Client Identifier used to request credentials
:kwarg client_secret: The client "secret" that goes with the client_id.
May be None if your IdP does not require you to use a secret.
:kwarg useragent: Useragent string to use. If not provided, defaults to
"python-openidc-client/VERSION"
:kwarg cachedir: The directory in which to store the token caches. Will
be put through expanduer. Default is ~/.openidc. If this does not
exist and we are unable to create it, the OSError will be thrown.
:kwargs printfd: The File object to print token instructions to.
"""
self.logger = logging.getLogger(__name__)
self.debug = self.logger.debug
self.app_id = app_identifier
self.use_post = use_post
self.idp = id_provider
self.idp_mapping = id_provider_mapping
self.client_id = client_id
self.client_secret = client_secret
self.useragent = useragent or 'python-openid-client/%s' % \
release.VERSION
self.cachedir = os.path.expanduser(cachedir or '~/.openidc')
self.last_returned_uuid = None
self.problem_reported = False
self.token_to_try = None
self._retrieved_code = None
# TODO: Make cache_lock a filesystem lock so we also lock across
# multiple invocations
self._cache_lock = Lock()
with self._cache_lock:
self.__refresh_cache()
self._valid_cache = []
self._printfd = printfd
def get_token(self, scopes, new_token=True):
"""Function to retrieve tokens with specific scopes.
This function will block until a token is retrieved if requested.
It is always safe to call this though, since if we already have a token
with the current app_identifier that has the required scopes, we will
return it.
This function will return a bearer token or None.
Note that the bearer token might have been revoked by the user or
expired.
In that case, you will want to call report_token_issue() to try to
renew the token or delete the token.
:kwarg scopes: A list of scopes required for the current client.
:kwarg new_token: If True, we will actively request the user to get a
new token with the current scopeset if we do not already have on.
:rtype: string or None
:returns: String bearer token if possible or None
"""
if not isinstance(scopes, list):
raise ValueError('Scopes must be a list')
token = self._get_token_with_scopes(scopes)
if token:
# If we had a valid token, use that
self.last_returned_uuid = token[0]
self.problem_reported = False
return token[1]['access_token']
elif not new_token:
return None
# We did not have a valid token, now comes the hard part...
uuid = self._get_new_token(scopes)
if uuid:
self.last_returned_uuid = uuid
self.problem_reported = False
return self._cache[uuid]['access_token']
def report_token_issue(self):
"""Report an error with the last token that was returned.
This will attempt to renew the token that was last returned.
If that worked, we will return the new access token.
If it did not work, we will return None and remove this token from the
cache.
If you get an indication from your application that the token you sent
was invalid, you should call it.
You should explicitly NOT call this function if the token was valid but
your request failed due to a server error or because the account or
token was lacking specific permissions.
"""
if not self.last_returned_uuid:
raise Exception('Cannot report issue before requesting token')
if self.problem_reported:
# We were reported an issue before. Let's just remove this token.
self._delete_token(self.last_returned_uuid)
return None
refresh_result = self._refresh_token(self.last_returned_uuid)
if not refresh_result:
self._delete_token(self.last_returned_uuid)
return None
else:
self.problem_reported = True
return self._cache[self.last_returned_uuid]['access_token']
def send_request(self, *args, **kwargs):
"""Make an python-requests POST request.
Allarguments and keyword arguments are like the arguments to requests,
except for `scopes`, `new_token` and `auto_refresh` keyword arguments.
`scopes` is required.
:kwarg scopes: Scopes required for this call. If a token is not present
with this token, a new one will be requested unless nonblocking is
True.
:kwarg new_token: If True, we will actively request the user to get a
new token with the current scopeset if we do not already have on.
:kwarg auto_refresh: If False, will not try to automatically report
token issues on 401. This helps with broken apps that may send a
401 return code in incorrect cases.
:kwargs http_method: The HTTP method to use, defaults to POST..
"""
ckwargs = copy(kwargs)
scopes = ckwargs.pop('scopes')
new_token = ckwargs.pop('new_token', True)
auto_refresh = ckwargs.pop('auto_refresh', True)
method = ckwargs.pop('http_method', 'POST')
is_retry = False
if self.token_to_try:
is_retry = True
token = self.token_to_try
self.token_to_try = None
else:
token = self.get_token(scopes, new_token=new_token)
if not token:
return None
if self.use_post:
if 'json' in ckwargs:
raise ValueError('Cannot provide json in a post call')
if method not in ['POST']:
raise ValueError('Cannot use POST tokens in %s method' %
method)
if 'data' not in ckwargs:
ckwargs['data'] = {}
ckwargs['data']['access_token'] = token
else:
if 'headers' not in ckwargs:
ckwargs['headers'] = {}
ckwargs['headers']['Authorization'] = 'Bearer %s' % token
resp = requests.request(method, *args, **ckwargs)
if resp.status_code == 401 and not is_retry:
if not auto_refresh:
return resp
self.token_to_try = self.report_token_issue()
if not self.token_to_try:
return resp
return self.send_request(*args, **kwargs)
elif resp.status_code == 401:
# We got a 401 and this is a retry. Report error
self.report_token_issue()
return resp
else:
return resp
@property
def _cachefile(self):
"""Property to get the cache file name for the current client.
This assures that whenever this file is touched, the cache lock is held
"""
assert self._cache_lock.locked()
return os.path.join(self.cachedir, 'oidc_%s.json' % self.app_id)
def __refresh_cache(self):
"""Refreshes the self._cache from the cache on disk.
Requires cache_lock to be held by caller."""
assert self._cache_lock.locked()
self.debug('Refreshing cache')
if not os.path.isdir(self.cachedir):
self.debug('Creating directory')
os.makedirs(self.cachedir)
if not os.path.exists(self._cachefile):
self.debug('Creating file')
with open(self._cachefile, 'w') as f:
f.write(json.dumps({}))
with open(self._cachefile, 'r') as f:
self._cache = json.loads(f.read())
self.debug('Loaded %i tokens', len(self._cache))
def _refresh_cache(self):
"""Refreshes the self._cache from the cache on disk.
cache_lock may not be held by anyone."""
with self._cache_lock:
self.__refresh_cache()
def __write_cache(self):
"""Wirtes self._cache to cache on disk.
Requires cache_lock to be held by caller."""
assert self._cache_lock.locked()
self.debug('Writing cache with %i tokens', len(self._cache))
with open(self._cachefile, 'w') as f:
f.write(json.dumps(self._cache))
def _add_token(self, token):
"""Adds a token to the cache and writes cache to disk.
cache_lock may not be held by anyone.
:param token: Dict of the token to be added to the cache
"""
uuid = uuidgen().hex
self.debug('Adding token %s to cache', uuid)
with self._cache_lock:
self.__refresh_cache()
self._cache[uuid] = token
self.__write_cache()
return uuid
def _update_token(self, uuid, toupdate):
"""Updates a token in the cache.
cache_lock may not be held by anyone.
:param token: UUID of the token to be updated
:param toupdate: Dict indicating which fields need to be updated
"""
self.debug('Updating token %s in cache, fields %s',
uuid, toupdate.keys())
with self._cache_lock:
self.__refresh_cache()
if uuid not in self._cache:
return None
self._cache[uuid].update(toupdate)
self.__write_cache()
return uuid
def _delete_token(self, uuid):
"""Removes a token from the cache and writes cache to disk.
cache_lock may not be held by anyone.
:param uuid: UUID of the token to be removed from cache
"""
self.debug('Removing token %s from cache', uuid)
with self._cache_lock:
self.__refresh_cache()
if uuid in self._cache:
self.debug('Removing token')
del self._cache[uuid]
self.__write_cache()
else:
self.debug('Token was already gone')
def _get_token_with_scopes(self, scopes):
"""Searches the cache for any tokens that have the requested scopes.
It will prefer to return tokens whose expires_at is still before the
current time, but if no such tokens exist it will return the possibly
expired token: it might be refreshable.
:param scopes: List of scopes that need to be in the returned token
:rtype: (string, dict) or None
:returns: Token UUID and contents or None if no applicable tokens were
found
"""
possible_token = None
self.debug('Trying to get token with scopes %s', scopes)
for uuid in self._cache:
self.debug('Checking %s', uuid)
token = self._cache[uuid]
if token['idp'] != self.idp:
self.debug('Incorrect idp')
continue
if not set(scopes).issubset(set(token['scopes'])):
self.debug('Missing scope: %s not subset of %s',
set(scopes),
set(token['scopes']))
continue
if token['expires_at'] < time.time():
# This is a token that's supposed to still be valid, prefer it
# over any others we have
self.debug('Not yet expired, returning')
return uuid, token
# This is a token that may or may not still be valid
self.debug('Possible')
possible_token = (uuid, token)
if possible_token:
self.debug('Returning possible token')
return possible_token
def _idp_url(self, method):
"""Returns the IdP URL for the requested method.
:param method: The method name in the IdP mapping dict.
:rtype: string
:returns: The IdP URL
"""
if method in self.idp_mapping:
return self.idp + self.idp_mapping[method]
else:
return ValueError('Idp Mapping did not include path for %s'
% method)
def _refresh_token(self, uuid):
"""Tries to refresh a token and put the refreshed token in self._cache
The caller is responsible for either removing the token if it could not
be refreshed or saving the cache if renewal was succesful.
:param uuid: The UUID of the cached token to attempt to refresh.
:rtype: bool
:returns: True if the token was succesfully refreshed, False otherwise
"""
oldtoken = self._cache[uuid]
self.debug('Refreshing token %s', uuid)
data = {'client_id': self.client_id,
'grant_type': 'refresh_token',
'refresh_token': oldtoken['refresh_token']}
if self.client_secret:
data['client_secret'] = self.client_secret
resp = requests.request(
'POST',
self._idp_url('Token'),
data=data)
resp.raise_for_status()
resp = resp.json()
if 'error' in resp:
self.debug('Unable to refresh, error: %s', resp['error'])
return False
self._update_token(
uuid,
{'access_token': resp['access_token'],
'token_type': resp['token_type'],
'refresh_token': resp['refresh_token'],
'expires_at': time.time() + resp['expires_in']})
self.debug('Refreshed until %s', self._cache[uuid]['expires_at'])
return True
def _get_server(self, app):
"""This function returns a SimpleServer with an available WEB_PORT."""
for port in WEB_PORTS:
try:
server = simple_server.make_server('0.0.0.0', port, app)
return server
except socket.error:
# This port did not work. Switch to next one
continue
def _get_new_token(self, scopes):
"""This function kicks off some magic.
We will start a new webserver on one of the WEB_PORTS, and then either
show the user a URL, or if possible, kick off their browser.
This URL will be the Authorization endpoint of the IdP with a request
for our client_id to get a new token with the specified scopes.
The webserver will then need to catch the return with either an
Authorization Code (that we will exchange for an access token) or the
cancellation message.
This function will store the new token in the local cache, add it to
the valid cache, and then return the UUID.
If the user cancelled (or we got another error), we will return None.
"""
def _token_app(environ, start_response):
query = environ['QUERY_STRING']
split = query.split('&')
kv = dict([v.split('=', 1) for v in split])
if 'error' in kv:
self.debug('Error code returned: %s (%s)',
kv['error'], kv.get('error_description'))
self._retrieved_code = False
else:
self._retrieved_code = kv['code']
# Just return a message
start_response('200 OK', [('Content-Type', 'text/plain')])
return [u'You can close this window and return to the CLI'.encode('ascii')]
self._retrieved_code = None
server = self._get_server(_token_app)
if not server:
raise Exception('We were unable to instantiate a webserver')
return_uri = 'http://localhost:%i/' % server.socket.getsockname()[1]
rquery = {}
rquery['scope'] = ' '.join(scopes)
rquery['response_type'] = 'code'
rquery['client_id'] = self.client_id
rquery['redirect_uri'] = return_uri
rquery['response_mode'] = 'query'
query = urlencode(rquery)
authz_url = '%s?%s' % (self._idp_url('Authorization'), query)
print('Please visit %s to grant authorization' % authz_url,
file=self._printfd)
webbrowser.open(authz_url)
server.handle_request()
server.server_close()
assert self._retrieved_code is not None
if self._retrieved_code is False:
# The user cancelled the request
self._retrieved_code = None
self.debug('User cancelled')
return None
self.debug('We got an authorization code!')
data = {'client_id': self.client_id,
'grant_type': 'authorization_code',
'redirect_uri': return_uri,
'code': self._retrieved_code}
if self.client_secret:
data['client_secret'] = self.client_secret
resp = requests.request(
'POST',
self._idp_url('Token'),
data=data)
resp.raise_for_status()
self._retrieved_code = None
resp = resp.json()
if 'error' in resp:
self.debug('Error exchanging authorization code: %s',
resp['error'])
return None
token = {'access_token': resp['access_token'],
'refresh_token': resp['refresh_token'],
'expires_at': time.time() + int(resp['expires_in']),
'idp': self.idp,
'token_type': resp['token_type'],
'scopes': scopes}
# AND WE ARE DONE! \o/
return self._add_token(token)
| [
"logging.getLogger",
"os.path.exists",
"os.makedirs",
"threading.Lock",
"json.dumps",
"os.path.join",
"webbrowser.open",
"requests.request",
"uuid.uuid4",
"os.path.isdir",
"urllib.parse.urlencode",
"copy.copy",
"time.time",
"os.path.expanduser",
"wsgiref.simple_server.make_server"
] | [((4037, 4064), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4054, 4064), False, 'import logging\n'), ((4451, 4495), 'os.path.expanduser', 'os.path.expanduser', (["(cachedir or '~/.openidc')"], {}), "(cachedir or '~/.openidc')\n", (4469, 4495), False, 'import os\n'), ((4773, 4779), 'threading.Lock', 'Lock', ([], {}), '()\n', (4777, 4779), False, 'from threading import Lock\n'), ((8738, 8750), 'copy.copy', 'copy', (['kwargs'], {}), '(kwargs)\n', (8742, 8750), False, 'from copy import copy\n'), ((9836, 9878), 'requests.request', 'requests.request', (['method', '*args'], {}), '(method, *args, **ckwargs)\n', (9852, 9878), False, 'import requests\n'), ((10632, 10689), 'os.path.join', 'os.path.join', (['self.cachedir', "('oidc_%s.json' % self.app_id)"], {}), "(self.cachedir, 'oidc_%s.json' % self.app_id)\n", (10644, 10689), False, 'import os\n'), ((19386, 19403), 'urllib.parse.urlencode', 'urlencode', (['rquery'], {}), '(rquery)\n', (19395, 19403), False, 'from urllib.parse import urlencode\n'), ((19584, 19610), 'webbrowser.open', 'webbrowser.open', (['authz_url'], {}), '(authz_url)\n', (19599, 19610), False, 'import webbrowser\n'), ((10932, 10960), 'os.path.isdir', 'os.path.isdir', (['self.cachedir'], {}), '(self.cachedir)\n', (10945, 10960), False, 'import os\n'), ((11019, 11045), 'os.makedirs', 'os.makedirs', (['self.cachedir'], {}), '(self.cachedir)\n', (11030, 11045), False, 'import os\n'), ((11061, 11092), 'os.path.exists', 'os.path.exists', (['self._cachefile'], {}), '(self._cachefile)\n', (11075, 11092), False, 'import os\n'), ((12152, 12161), 'uuid.uuid4', 'uuidgen', ([], {}), '()\n', (12159, 12161), True, 'from uuid import uuid4 as uuidgen\n'), ((11890, 11913), 'json.dumps', 'json.dumps', (['self._cache'], {}), '(self._cache)\n', (11900, 11913), False, 'import json\n'), ((14717, 14728), 'time.time', 'time.time', ([], {}), '()\n', (14726, 14728), False, 'import time\n'), ((17244, 17291), 'wsgiref.simple_server.make_server', 'simple_server.make_server', (['"""0.0.0.0"""', 'port', 'app'], {}), "('0.0.0.0', port, app)\n", (17269, 17291), False, 'from wsgiref import simple_server\n'), ((20747, 20758), 'time.time', 'time.time', ([], {}), '()\n', (20756, 20758), False, 'import time\n'), ((11208, 11222), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (11218, 11222), False, 'import json\n'), ((16930, 16941), 'time.time', 'time.time', ([], {}), '()\n', (16939, 16941), False, 'import time\n')] |
import datetime
import uuid
import simplejson as json
from src.db.s3_client import Client as S3Client
from decimal import Decimal
def get_from_archive(archive_key):
''' Download a VP Save from S3.
:param str archive_key: The vp_save data's location (S3 bucket and file path). This value is required.
'''
if archive_key is None or '/' not in archive_key:
raise ValueError()
bucket, key = archive_key.split('/', 1)
s3_client = S3Client()
try:
archive_object = json.loads(s3_client.get_object(bucket, key)['Body'].read(),parse_float=Decimal)
except Exception as e:
print('ERROR: Error downloading ' + key + ' from ' + bucket + ' bucket. ERROR\n%s' %e)
raise
return archive_object
def build(vp_save={}):
''' Builds and returns a valid vp_save object.
Builds a new vp_save object by creating default values for
required fields and combines any of the given attributes.
'''
vp_save['PK'] = str(uuid.uuid4())
# Set timestamps (for new data)
now = datetime.datetime.now().isoformat()
vp_save['date_created'] = now
vp_save['last_modified'] = now
vp_save['item_type'] = 'vp_save'
return vp_save
def archive(bucket, vp_save_pk, save_data):
''' Archives a vp save data to S3.
Uploads the save data object as a JSON file to S3. The location of the archive
depends on the bucket and the primary key of the save data. If the upload fails,
an exception is raised. If successful, returns the archive location.
:param str bucket: The name of the S3 bucket for the archive. This value is required.
:param str vp_save_pk: The vp_save PK to use as the name of the JSON file. This value is required.
:param obj save_data: The save data object to archive. This value is required.
'''
if bucket is None or len(bucket) <= 0:
raise ValueError()
if vp_save_pk is None or len(vp_save_pk) <= 0:
raise ValueError()
if not save_data:
raise ValueError()
archive_file = __archive_key(save_data) + '/' + vp_save_pk + '.json'
# Upload curation data to S3 archive bucket.
s3_client = S3Client()
try:
s3_client.put_object(
bytes(json.dumps(save_data).encode('UTF-8')),
bucket,
archive_file
)
except Exception as e:
print('ERROR: Error uploading ' + archive_file + ' to ' + bucket + ' bucket. ERROR\n%s' %e)
raise
archive_key_comps = [bucket, archive_file]
return '/'.join(archive_key_comps)
def __archive_key(save_data):
return save_data['PK']
| [
"simplejson.dumps",
"datetime.datetime.now",
"uuid.uuid4",
"src.db.s3_client.Client"
] | [((448, 458), 'src.db.s3_client.Client', 'S3Client', ([], {}), '()\n', (456, 458), True, 'from src.db.s3_client import Client as S3Client\n'), ((2070, 2080), 'src.db.s3_client.Client', 'S3Client', ([], {}), '()\n', (2078, 2080), True, 'from src.db.s3_client import Client as S3Client\n'), ((945, 957), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (955, 957), False, 'import uuid\n'), ((1002, 1025), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1023, 1025), False, 'import datetime\n'), ((2127, 2148), 'simplejson.dumps', 'json.dumps', (['save_data'], {}), '(save_data)\n', (2137, 2148), True, 'import simplejson as json\n')] |
"""
Basic usage
===========
This example presents the basic usage of brokenaxes
"""
import matplotlib.pyplot as plt
from brokenaxes import brokenaxes
import numpy as np
fig = plt.figure(figsize=(5,2))
bax = brokenaxes(xlims=((0, .1), (.4, .7)), ylims=((-1, .7), (.79, 1)), hspace=.05)
x = np.linspace(0, 1, 100)
bax.plot(x, np.sin(10 * x), label='sin')
bax.plot(x, np.cos(10 * x), label='cos')
bax.legend(loc=3)
bax.set_xlabel('time')
bax.set_ylabel('value')
| [
"matplotlib.pyplot.figure",
"brokenaxes.brokenaxes",
"numpy.linspace",
"numpy.cos",
"numpy.sin"
] | [((180, 206), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 2)'}), '(figsize=(5, 2))\n', (190, 206), True, 'import matplotlib.pyplot as plt\n'), ((212, 299), 'brokenaxes.brokenaxes', 'brokenaxes', ([], {'xlims': '((0, 0.1), (0.4, 0.7))', 'ylims': '((-1, 0.7), (0.79, 1))', 'hspace': '(0.05)'}), '(xlims=((0, 0.1), (0.4, 0.7)), ylims=((-1, 0.7), (0.79, 1)),\n hspace=0.05)\n', (222, 299), False, 'from brokenaxes import brokenaxes\n'), ((294, 316), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (305, 316), True, 'import numpy as np\n'), ((329, 343), 'numpy.sin', 'np.sin', (['(10 * x)'], {}), '(10 * x)\n', (335, 343), True, 'import numpy as np\n'), ((370, 384), 'numpy.cos', 'np.cos', (['(10 * x)'], {}), '(10 * x)\n', (376, 384), True, 'import numpy as np\n')] |
import shutil
import os
import json
import glob
import yaml
import sys
import urllib
import ssl
import csv
import time
import requests
import json
import csv
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
all = Graph()
with open("data/dict.json") as f:
ln_map = json.load(f)
st_path = "../data/index.json"
with open(st_path) as f:
result = json.load(f)
uris = []
for obj in result:
fields = ["spatial", "agential"]
for field in fields:
values = obj[field]
for value in values:
uri = "chname:"+value
if field == "spatial":
uri = "place:"+value
if uri not in uris:
uris.append(uri)
for uri in uris:
print(uri)
tmp = uri.split(":")
prefix = tmp[0]
suffix = tmp[1]
ln = suffix
ln_org = ""
if ln in ln_map:
ln_org = ln
ln = ln_map[ln]
if len(ln) > 20:
continue
# ln = obj["uri"].split(":")[1]
'''
wiki_path = "data/wikidata/"+ln+".json"
wiki = {}
if os.path.exists(wiki_path):
with open(wiki_path) as f:
wiki = json.load(f)
# sameAs
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(wiki_url))
all.add(stmt)
obj = wiki["entities"][wiki_url.split("/")[-1]]
# description
if "descriptions" in obj and "ja" in obj["descriptions"]:
stmt = (subject, URIRef("http://schema.org/description"), Literal(obj["descriptions"]["ja"]["value"], lang="ja"))
all.add(stmt)
# label
if "labels" in obj and "ja" in obj["labels"]:
stmt = (subject, RDFS.label, Literal(obj["labels"]["ja"]["value"]))
all.add(stmt)
ln = wiki_url.split("/")[-1]
'''
db_path = "data/dbpedia_ja/"+ln+".json"
wiki_path = "data/wikidata/"+ln+".json"
db = {}
wiki = {}
if os.path.exists(db_path):
with open(db_path) as f:
db = json.load(f)
if os.path.exists(wiki_path):
with open(wiki_path) as f:
wiki = json.load(f)
db_uri = "http://ja.dbpedia.org/resource/"+ln
if db_uri not in db:
print("not" , db_uri)
continue
# ######
subject = URIRef("https://shibusawa-dlab.github.io/lab1/api/"+prefix+"/"+ln)
if prefix == "chname":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Agent"))
all.add(stmt)
elif prefix == "time":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Time"))
all.add(stmt)
elif prefix == "place":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Place"))
all.add(stmt)
elif prefix == "event":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Event"))
all.add(stmt)
elif prefix == "org":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Organization"))
all.add(stmt)
elif prefix == "keyword":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Keyword"))
all.add(stmt)
elif prefix == "type":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Type"))
all.add(stmt)
# ######
obj = db[db_uri]
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(db_uri))
all.add(stmt)
if "http://dbpedia.org/ontology/thumbnail" in obj:
stmt = (subject, URIRef("http://schema.org/image"), URIRef(obj["http://dbpedia.org/ontology/thumbnail"][0]["value"]))
all.add(stmt)
if "http://www.w3.org/2000/01/rdf-schema#label" in obj:
labels = obj["http://www.w3.org/2000/01/rdf-schema#label"]
for label in labels:
if label["lang"] == "ja":
stmt = (subject, RDFS.label, Literal(label["value"]))
all.add(stmt)
if "http://www.w3.org/2000/01/rdf-schema#comment" in obj:
labels = obj["http://www.w3.org/2000/01/rdf-schema#comment"]
for label in labels:
stmt = (subject, URIRef("http://schema.org/description"), Literal(label["value"], lang=label["lang"]))
all.add(stmt)
if "http://www.w3.org/2002/07/owl#sameAs" in obj:
labels = obj["http://www.w3.org/2002/07/owl#sameAs"]
for label in labels:
value = label["value"]
if "http://dbpedia.org" in value or "http://ja.dbpedia.org" in value or "www.wikidata.org" in value:
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(value))
all.add(stmt)
# 位置情報
'''
if "point" in obj and prefix == "place":
value = obj["point"]["value"].split(" ")
# addGeo関数
geoUri = addGeo({
"lat" : float(value[0]),
"long": float(value[1])
})
stmt = (subject, URIRef("http://schema.org/geo"), geoUri)
if suffix not in places:
places[suffix] = {
"lat" : float(value[0]),
"long": float(value[1])
}
all.add(stmt)
'''
# 正規化前
if ln_org != "" and ln != ln_org:
stmt = (subject, URIRef("http://schema.org/name"), Literal(ln_org))
all.add(stmt)
path = "data/all.json"
all.serialize(destination=path, format='json-ld')
all.serialize(destination=path.replace(".json", ".rdf"), format='pretty-xml') | [
"os.path.exists",
"rdflib.Literal",
"rdflib.Graph",
"json.load",
"rdflib.URIRef"
] | [((295, 302), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (300, 302), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((351, 363), 'json.load', 'json.load', (['f'], {}), '(f)\n', (360, 363), False, 'import json\n'), ((435, 447), 'json.load', 'json.load', (['f'], {}), '(f)\n', (444, 447), False, 'import json\n'), ((2146, 2169), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (2160, 2169), False, 'import os\n'), ((2254, 2279), 'os.path.exists', 'os.path.exists', (['wiki_path'], {}), '(wiki_path)\n', (2268, 2279), False, 'import os\n'), ((2540, 2612), 'rdflib.URIRef', 'URIRef', (["('https://shibusawa-dlab.github.io/lab1/api/' + prefix + '/' + ln)"], {}), "('https://shibusawa-dlab.github.io/lab1/api/' + prefix + '/' + ln)\n", (2546, 2612), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((3710, 3756), 'rdflib.URIRef', 'URIRef', (['"""http://www.w3.org/2002/07/owl#sameAs"""'], {}), "('http://www.w3.org/2002/07/owl#sameAs')\n", (3716, 3756), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((3758, 3772), 'rdflib.URIRef', 'URIRef', (['db_uri'], {}), '(db_uri)\n', (3764, 3772), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((2229, 2241), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2238, 2241), False, 'import json\n'), ((2343, 2355), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2352, 2355), False, 'import json\n'), ((2678, 2726), 'rdflib.URIRef', 'URIRef', (['"""https://jpsearch.go.jp/term/type/Agent"""'], {}), "('https://jpsearch.go.jp/term/type/Agent')\n", (2684, 2726), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((3885, 3918), 'rdflib.URIRef', 'URIRef', (['"""http://schema.org/image"""'], {}), "('http://schema.org/image')\n", (3891, 3918), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((3920, 3984), 'rdflib.URIRef', 'URIRef', (["obj['http://dbpedia.org/ontology/thumbnail'][0]['value']"], {}), "(obj['http://dbpedia.org/ontology/thumbnail'][0]['value'])\n", (3926, 3984), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((5756, 5788), 'rdflib.URIRef', 'URIRef', (['"""http://schema.org/name"""'], {}), "('http://schema.org/name')\n", (5762, 5788), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((5790, 5805), 'rdflib.Literal', 'Literal', (['ln_org'], {}), '(ln_org)\n', (5797, 5805), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((2824, 2871), 'rdflib.URIRef', 'URIRef', (['"""https://jpsearch.go.jp/term/type/Time"""'], {}), "('https://jpsearch.go.jp/term/type/Time')\n", (2830, 2871), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((4537, 4576), 'rdflib.URIRef', 'URIRef', (['"""http://schema.org/description"""'], {}), "('http://schema.org/description')\n", (4543, 4576), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((4578, 4621), 'rdflib.Literal', 'Literal', (["label['value']"], {'lang': "label['lang']"}), "(label['value'], lang=label['lang'])\n", (4585, 4621), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((2970, 3018), 'rdflib.URIRef', 'URIRef', (['"""https://jpsearch.go.jp/term/type/Place"""'], {}), "('https://jpsearch.go.jp/term/type/Place')\n", (2976, 3018), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((4272, 4295), 'rdflib.Literal', 'Literal', (["label['value']"], {}), "(label['value'])\n", (4279, 4295), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((5003, 5049), 'rdflib.URIRef', 'URIRef', (['"""http://www.w3.org/2002/07/owl#sameAs"""'], {}), "('http://www.w3.org/2002/07/owl#sameAs')\n", (5009, 5049), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((5051, 5064), 'rdflib.URIRef', 'URIRef', (['value'], {}), '(value)\n', (5057, 5064), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((3117, 3165), 'rdflib.URIRef', 'URIRef', (['"""https://jpsearch.go.jp/term/type/Event"""'], {}), "('https://jpsearch.go.jp/term/type/Event')\n", (3123, 3165), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((3262, 3317), 'rdflib.URIRef', 'URIRef', (['"""https://jpsearch.go.jp/term/type/Organization"""'], {}), "('https://jpsearch.go.jp/term/type/Organization')\n", (3268, 3317), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((3418, 3468), 'rdflib.URIRef', 'URIRef', (['"""https://jpsearch.go.jp/term/type/Keyword"""'], {}), "('https://jpsearch.go.jp/term/type/Keyword')\n", (3424, 3468), False, 'from rdflib import URIRef, BNode, Literal, Graph\n'), ((3566, 3613), 'rdflib.URIRef', 'URIRef', (['"""https://jpsearch.go.jp/term/type/Type"""'], {}), "('https://jpsearch.go.jp/term/type/Type')\n", (3572, 3613), False, 'from rdflib import URIRef, BNode, Literal, Graph\n')] |
import unittest
try:
from unittest.mock import *
except ImportError:
from mock import *
from msgpack import *
import bootloader_read_config
from commands import *
import sys
import json
class ReadConfigToolTestCase(unittest.TestCase):
@patch('utils.write_command_retry')
@patch('utils.write_command')
@patch('utils.open_connection')
@patch('builtins.print')
def test_integration(self, print_mock, open_conn, write_command,
write_command_retry):
sys.argv = "test.py -p /dev/ttyUSB0 0 1 2".split()
configs = [{'id': i} for i in range(3)]
write_command_retry.return_value = {
i: packb(configs[i]) for i in range(3)
}
open_conn.return_value = object()
bootloader_read_config.main()
write_command_retry.assert_any_call(open_conn.return_value,
encode_read_config(), [0, 1, 2])
all_configs = {i: configs[i] for i in range(3)}
print_mock.assert_any_call(json.dumps(all_configs, indent=4,
sort_keys=True))
@patch('utils.open_connection')
@patch('utils.write_command_retry')
@patch('utils.write_command')
@patch('utils.read_can_datagrams')
@patch('builtins.print')
def test_network_discovery(self, print_mock, read_can_datagram,
write_command, write_command_retry, open_conn):
"""
Checks if we can perform a whole network discovery.
"""
sys.argv = "test.py -p /dev/ttyUSB0 --all".split()
# The first two board answers the ping
board_answers = [(b'', [0], i) for i in range(1, 3)] + [None]
read_can_datagram.return_value = iter(board_answers)
write_command_retry.return_value = {
i: packb({'id': i}) for i in range(1, 3)
}
bootloader_read_config.main()
write_command.assert_any_call(open_conn.return_value,
encode_ping(),
list(range(1, 128)))
| [
"bootloader_read_config.main",
"json.dumps"
] | [((769, 798), 'bootloader_read_config.main', 'bootloader_read_config.main', ([], {}), '()\n', (796, 798), False, 'import bootloader_read_config\n'), ((1902, 1931), 'bootloader_read_config.main', 'bootloader_read_config.main', ([], {}), '()\n', (1929, 1931), False, 'import bootloader_read_config\n'), ((1038, 1087), 'json.dumps', 'json.dumps', (['all_configs'], {'indent': '(4)', 'sort_keys': '(True)'}), '(all_configs, indent=4, sort_keys=True)\n', (1048, 1087), False, 'import json\n')] |
import logging
from asyncio import sleep
import discord
from discord.ext import commands
from config import SETTINGS
from crew import crew_embed
from diary import diary_embed
from film import film_embed
from helpers import LetterboxdError
from list_ import list_embed
from review import review_embed
from user import user_embed
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s | %(message)s',
datefmt='%m/%d %H:%M:%S')
bot = commands.Bot(command_prefix='!', case_insensitive=True)
bot.remove_command('help')
@bot.event
async def on_ready():
logging.info(
'Logged in %d servers as %s' % (len(bot.guilds), bot.user.name))
bot.loop.create_task(update_stats())
@bot.event
async def on_message(message):
if message.content.startswith('!'):
message.content = message.content.replace('’', '').replace('‘', '')
await bot.process_commands(message)
async def update_stats():
while True:
await bot.change_presence(
activity=discord.Game('!helplb - {} servers'.format(
len(bot.guilds))))
await sleep(900)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('This command requires a parameter.')
elif isinstance(error, commands.BotMissingPermissions):
await ctx.send('This command requires the {} permission.'.format(
', '.join(err for err in error.missing_perms)))
elif isinstance(error, (commands.CommandNotFound, commands.CheckFailure)):
return
elif isinstance(error, commands.CommandInvokeError):
if isinstance(error.original, discord.HTTPException):
return
else:
await ctx.send('Sorry, the command crashed. :/')
logging.error(ctx.message.content)
raise error
async def send_msg(ctx, msg):
if isinstance(msg, discord.Embed):
await ctx.send(embed=msg)
else:
await ctx.send(msg)
# Commands
@bot.command()
async def helplb(ctx):
help_embed = discord.Embed(colour=discord.Color.from_rgb(54, 57, 62))
help_embed.set_thumbnail(url='https://i.imgur.com/Kr1diFu.png')
help_embed.set_author(
name='Letterboxd Bot', icon_url='https://i.imgur.com/5VALKVy.jpg')
help_embed.set_footer(
text='Created by Porkepik#2664',
icon_url='https://i.imgur.com/li4cLpd.png')
for key, value in SETTINGS['help'].items():
help_embed.add_field(name=key, value=value, inline=False)
help_embed.description = 'Invite Bot | '\
+ '[GitHub](https://github.com/Porkepik/Letterboxd-Bot)'
await ctx.send(embed=help_embed)
@bot.command()
async def user(ctx, username):
try:
msg = await user_embed(username)
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command()
async def diary(ctx, username):
try:
msg = await diary_embed(username)
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command(aliases=['actor', 'actress', 'director'])
async def crew(ctx, *, arg):
try:
msg = await crew_embed(arg, ctx.invoked_with)
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command(aliases=['movie'])
async def film(ctx, *, arg):
try:
# eiga.me ratings for specific servers
if ctx.guild and ctx.guild.id in SETTINGS['mkdb_servers']:
msg = await film_embed(arg, True)
else:
msg = await film_embed(arg)
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
async def check_if_two_args(ctx):
msg = ctx.message.content.split()
if len(msg) < 3:
await ctx.send('This command requires 2 parameters.')
return len(msg) > 2
@bot.command(name='list')
@commands.check(check_if_two_args)
async def list_(ctx, username, *args):
try:
msg = await list_embed(username, ' '.join(str(i) for i in args))
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command(aliases=['entry'])
@commands.check(check_if_two_args)
async def review(ctx, username, *args):
try:
msg = await review_embed(username, ' '.join(str(i) for i in args))
except LetterboxdError as err:
msg = err
await send_msg(ctx, msg)
@bot.command(name='del')
@commands.bot_has_permissions(manage_messages=True)
async def delete(ctx):
await ctx.message.delete()
found_bot_msg = False
found_usr_cmd = False
cmd_list = list()
for command in bot.commands:
cmd_list.append('!' + command.name)
for alias in command.aliases:
cmd_list.append('!' + alias)
async for log_message in ctx.channel.history(limit=30):
if log_message.author.id == bot.user.id and not found_bot_msg:
bot_message = log_message
found_bot_msg = True
elif found_bot_msg:
if log_message.content:
first_word = log_message.content.split()[0]
else:
continue
if first_word in cmd_list:
found_usr_cmd = True
cmd_message = log_message
break
if found_usr_cmd:
if not ctx.author.permissions_in(ctx.channel).manage_messages:
if not cmd_message.author.id == ctx.author.id:
return
await cmd_message.delete()
await bot_message.delete()
bot.run(SETTINGS['discord'])
| [
"logging.basicConfig",
"discord.ext.commands.Bot",
"user.user_embed",
"discord.ext.commands.bot_has_permissions",
"film.film_embed",
"discord.ext.commands.check",
"crew.crew_embed",
"discord.Color.from_rgb",
"asyncio.sleep",
"diary.diary_embed",
"logging.error"
] | [((330, 435), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s | %(message)s"""', 'datefmt': '"""%m/%d %H:%M:%S"""'}), "(level=logging.INFO, format='%(asctime)s | %(message)s',\n datefmt='%m/%d %H:%M:%S')\n", (349, 435), False, 'import logging\n'), ((452, 507), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""!"""', 'case_insensitive': '(True)'}), "(command_prefix='!', case_insensitive=True)\n", (464, 507), False, 'from discord.ext import commands\n'), ((3837, 3870), 'discord.ext.commands.check', 'commands.check', (['check_if_two_args'], {}), '(check_if_two_args)\n', (3851, 3870), False, 'from discord.ext import commands\n'), ((4109, 4142), 'discord.ext.commands.check', 'commands.check', (['check_if_two_args'], {}), '(check_if_two_args)\n', (4123, 4142), False, 'from discord.ext import commands\n'), ((4377, 4427), 'discord.ext.commands.bot_has_permissions', 'commands.bot_has_permissions', ([], {'manage_messages': '(True)'}), '(manage_messages=True)\n', (4405, 4427), False, 'from discord.ext import commands\n'), ((1099, 1109), 'asyncio.sleep', 'sleep', (['(900)'], {}), '(900)\n', (1104, 1109), False, 'from asyncio import sleep\n'), ((2078, 2112), 'discord.Color.from_rgb', 'discord.Color.from_rgb', (['(54)', '(57)', '(62)'], {}), '(54, 57, 62)\n', (2100, 2112), False, 'import discord\n'), ((2743, 2763), 'user.user_embed', 'user_embed', (['username'], {}), '(username)\n', (2753, 2763), False, 'from user import user_embed\n'), ((2924, 2945), 'diary.diary_embed', 'diary_embed', (['username'], {}), '(username)\n', (2935, 2945), False, 'from diary import diary_embed\n'), ((3143, 3176), 'crew.crew_embed', 'crew_embed', (['arg', 'ctx.invoked_with'], {}), '(arg, ctx.invoked_with)\n', (3153, 3176), False, 'from crew import crew_embed\n'), ((3469, 3490), 'film.film_embed', 'film_embed', (['arg', '(True)'], {}), '(arg, True)\n', (3479, 3490), False, 'from film import film_embed\n'), ((3529, 3544), 'film.film_embed', 'film_embed', (['arg'], {}), '(arg)\n', (3539, 3544), False, 'from film import film_embed\n'), ((1789, 1823), 'logging.error', 'logging.error', (['ctx.message.content'], {}), '(ctx.message.content)\n', (1802, 1823), False, 'import logging\n')] |
localhost = "http://localhost/" # your local host
database = "mysql://root@localhost/vaticChecker" # server://user:pass@localhost/dbname
min_training = 2 # the minimum number of training videos to be considered
recaptcha_secret = "" # recaptcha secret for verification
duplicate_annotations = False # Should the server allow for duplicate annotations?
import os.path
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# TODO: remove on server
import os
os.environ['PYTHON_EGG_CACHE'] = '/tmp/apache'
| [
"os.path.abspath"
] | [((431, 456), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (446, 456), False, 'import os\n')] |
"""Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, basestring) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, basestring) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is not None:
_active.value = self.old_timezone
else:
del _active.value
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
| [
"datetime.tzinfo.__init__",
"time.localtime",
"pytz.timezone",
"threading.local",
"datetime.datetime.utcnow",
"time.mktime",
"datetime.datetime.now",
"datetime.timedelta"
] | [((506, 518), 'datetime.timedelta', 'timedelta', (['(0)'], {}), '(0)\n', (515, 518), False, 'from datetime import datetime, timedelta, tzinfo\n'), ((3077, 3084), 'threading.local', 'local', ([], {}), '()\n', (3082, 3084), False, 'from threading import local\n'), ((1259, 1293), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(-_time.timezone)'}), '(seconds=-_time.timezone)\n', (1268, 1293), False, 'from datetime import datetime, timedelta, tzinfo\n'), ((1505, 1526), 'datetime.tzinfo.__init__', 'tzinfo.__init__', (['self'], {}), '(self)\n', (1520, 1526), False, 'from datetime import datetime, timedelta, tzinfo\n'), ((2082, 2098), 'time.mktime', '_time.mktime', (['tt'], {}), '(tt)\n', (2094, 2098), True, 'import time as _time\n'), ((2112, 2134), 'time.localtime', '_time.localtime', (['stamp'], {}), '(stamp)\n', (2127, 2134), True, 'import time as _time\n'), ((6842, 6856), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6854, 6856), False, 'from datetime import datetime, timedelta, tzinfo\n'), ((1350, 1383), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(-_time.altzone)'}), '(seconds=-_time.altzone)\n', (1359, 1383), False, 'from datetime import datetime, timedelta, tzinfo\n'), ((2734, 2767), 'pytz.timezone', 'pytz.timezone', (['settings.TIME_ZONE'], {}), '(settings.TIME_ZONE)\n', (2747, 2767), False, 'import pytz\n'), ((3656, 3678), 'datetime.datetime.now', 'datetime.now', (['timezone'], {}), '(timezone)\n', (3668, 3678), False, 'from datetime import datetime, timedelta, tzinfo\n'), ((4245, 4268), 'pytz.timezone', 'pytz.timezone', (['timezone'], {}), '(timezone)\n', (4258, 4268), False, 'import pytz\n'), ((6779, 6796), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6794, 6796), False, 'from datetime import datetime, timedelta, tzinfo\n')] |
#!/usr/bin/env python3
import kfp.dsl as dsl
import kfp.gcp as gcp
# Pipeline input variables.
KUBECTL_IMAGE = "gcr.io/mcas-195423/trackml_master_kfp_kubectl"
KUBECTL_IMAGE_VERSION = "1"
TRACKML_IMAGE = "gcr.io/mcas-195423/trackml_master_trackml"
TRACKML_IMAGE_VERSION = "1"
def train_op():
return dsl.ContainerOp(
name='train',
image="{}:{}".format(TRACKML_IMAGE, TRACKML_IMAGE_VERSION),
command=["python"],
arguments=["train.py"],
).apply(gcp.use_gcp_secret()
)#.set_gpu_limit(1)
def serve_op():
return dsl.ContainerOp(
name='serve',
image="{}:{}".format(KUBECTL_IMAGE, KUBECTL_IMAGE_VERSION),
arguments=[
"/src/set_kubectl.sh",
"--namespace", "kubeflow",
"--command", "apply -f /src/k8s/serve.yaml",
]
).apply(gcp.use_gcp_secret())
def resultsgen_op():
return dsl.ContainerOp(
name='resultsgen',
image="{}:{}".format(TRACKML_IMAGE, TRACKML_IMAGE_VERSION),
command=["python"],
arguments=["resultsgen.py"],
).apply(gcp.use_gcp_secret())
@dsl.pipeline(
name='trackml',
description='A pipeline that predicts particle tracks'
)
def trackml():
train = train_op()
serve = serve_op()
serve.after(train)
resultsgen = resultsgen_op()
resultsgen.after(serve)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(trackml, __file__ + '.tar.gz')
| [
"kfp.gcp.use_gcp_secret",
"kfp.dsl.pipeline",
"kfp.compiler.Compiler"
] | [((1025, 1114), 'kfp.dsl.pipeline', 'dsl.pipeline', ([], {'name': '"""trackml"""', 'description': '"""A pipeline that predicts particle tracks"""'}), "(name='trackml', description=\n 'A pipeline that predicts particle tracks')\n", (1037, 1114), True, 'import kfp.dsl as dsl\n'), ((464, 484), 'kfp.gcp.use_gcp_secret', 'gcp.use_gcp_secret', ([], {}), '()\n', (482, 484), True, 'import kfp.gcp as gcp\n'), ((777, 797), 'kfp.gcp.use_gcp_secret', 'gcp.use_gcp_secret', ([], {}), '()\n', (795, 797), True, 'import kfp.gcp as gcp\n'), ((1001, 1021), 'kfp.gcp.use_gcp_secret', 'gcp.use_gcp_secret', ([], {}), '()\n', (1019, 1021), True, 'import kfp.gcp as gcp\n'), ((1317, 1336), 'kfp.compiler.Compiler', 'compiler.Compiler', ([], {}), '()\n', (1334, 1336), True, 'import kfp.compiler as compiler\n')] |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Encoder-Decoder architectures """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import torch
from torch import nn
from .modeling_auto import AutoModel, AutoModelWithLMHead
logger = logging.getLogger(__name__)
class PreTrainedEncoderDecoder(nn.Module):
r"""
:class:`~transformers.PreTrainedEncoderDecoder` is a generic model class that will be
instantiated as a transformer architecture with one of the base model
classes of the library as encoder and (optionally) another one as
decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
"""
def __init__(self, encoder, decoder):
super(PreTrainedEncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
@classmethod
def from_pretrained(
cls,
encoder_pretrained_model_name_or_path=None,
decoder_pretrained_model_name_or_path=None,
*model_args,
**kwargs
):
r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you need to first set it back in training mode with `model.train()`
Params:
encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
You can specify kwargs sepcific for the encoder and decoder by prefixing the key with `encoder_` and `decoder_` respectively. (e.g. ``decoder_output_attention=True``). The remaining kwargs will be passed to both encoders and decoders.
Examples::
model = PreTrainedEncoderDecoder.from_pretained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
"""
# keyword arguments come in 3 flavors: encoder-specific (prefixed by
# `encoder_`), decoder-specific (prefixed by `decoder_`) and those
# that apply to the model as a whole.
# We let the specific kwargs override the common ones in case of conflict.
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_")
and not argument.startswith("decoder_")
}
kwargs_decoder = kwargs_common.copy()
kwargs_encoder = kwargs_common.copy()
kwargs_encoder.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
kwargs_decoder.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
encoder = AutoModel.from_pretrained(
encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
)
encoder.config.is_decoder = False
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
decoder = AutoModelWithLMHead.from_pretrained(
decoder_pretrained_model_name_or_path, **kwargs_decoder
)
decoder.config.is_decoder = True
model = cls(encoder, decoder)
return model
def save_pretrained(self, save_directory):
""" Save a Seq2Seq model and its configuration file in a format such
that it can be loaded using `:func:`~transformers.PreTrainedEncoderDecoder.from_pretrained`
We save the encoder' and decoder's parameters in two separate directories.
"""
self.encoder.save_pretrained(os.path.join(save_directory, "encoder"))
self.decoder.save_pretrained(os.path.join(save_directory, "decoder"))
def forward(self, encoder_input_ids, decoder_input_ids, **kwargs):
""" The forward pass on a seq2eq depends what we are performing:
- During training we perform one forward pass through both the encoder
and decoder;
- During prediction, we perform one forward pass through the encoder,
and then perform several forward passes with the encoder's hidden
state through the decoder to decode a full sequence.
Therefore, we skip the forward pass on the encoder if an argument named
`encoder_hidden_state` is passed to this function.
Params:
encoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of encoder input sequence tokens in the vocabulary.
decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of decoder input sequence tokens in the vocabulary.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
"""
# keyword arguments come in 3 flavors: encoder-specific (prefixed by
# `encoder_`), decoder-specific (prefixed by `decoder_`) and those
# that apply to the model as whole.
# We let the specific kwargs override the common ones in case of conflict.
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_")
and not argument.startswith("decoder_")
}
kwargs_decoder = kwargs_common.copy()
kwargs_encoder = kwargs_common.copy()
kwargs_encoder.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
kwargs_decoder.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
# Encode if needed (training, first prediction pass)
encoder_hidden_states = kwargs_encoder.pop("hidden_states", None)
if encoder_hidden_states is None:
encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder)
encoder_hidden_states = encoder_outputs[
0
] # output the last layer hidden state
else:
encoder_outputs = ()
# Decode
kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states
kwargs_decoder["encoder_attention_mask"] = kwargs_encoder.get(
"attention_mask", None
)
decoder_outputs = self.decoder(decoder_input_ids, **kwargs_decoder)
return decoder_outputs + encoder_outputs
class Model2Model(PreTrainedEncoderDecoder):
r"""
:class:`~transformers.Model2Model` instantiates a Seq2Seq2 model
where both of the encoder and decoder are of the same family. If the
name of or that path to a pretrained model is specified the encoder and
the decoder will be initialized with the pretrained weight (the
cross-attention will be intialized randomly if its weights are not
present).
It is possible to override this behavior and initialize, say, the decoder randomly
by creating it beforehand as follows
config = BertConfig.from_pretrained()
decoder = BertForMaskedLM(config)
model = Model2Model.from_pretrained('bert-base-uncased', decoder_model=decoder)
"""
def __init__(self, *args, **kwargs):
super(Model2Model, self).__init__(*args, **kwargs)
self.tie_weights()
def tie_weights(self):
""" Tying the encoder and decoders' embeddings together.
We need for each to get down to the embedding weights. However the
different model classes are inconsistent to that respect:
- BertModel: embeddings.word_embeddings
- RoBERTa: embeddings.word_embeddings
- XLMModel: embeddings
- GPT2: wte
- BertForMaskedLM: bert.embeddings.word_embeddings
- RobertaForMaskedLM: roberta.embeddings.word_embeddings
argument of the XEmbedding layer for each model, but it is "blocked"
by a model-specific keyword (bert, )...
"""
# self._tie_or_clone_weights(self.encoder, self.decoder)
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
if (
"bert" not in pretrained_model_name_or_path
or "roberta" in pretrained_model_name_or_path
or "distilbert" in pretrained_model_name_or_path
):
raise ValueError("Only the Bert model is currently supported.")
model = super(Model2Model, cls).from_pretrained(
encoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
decoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
*args,
**kwargs
)
return model
class Model2LSTM(PreTrainedEncoderDecoder):
@classmethod
def from_pretrained(cls, *args, **kwargs):
if kwargs.get("decoder_model", None) is None:
# We will create a randomly initilized LSTM model as decoder
if "decoder_config" not in kwargs:
raise ValueError(
"To load an LSTM in Encoder-Decoder model, please supply either: "
" - a torch.nn.LSTM model as `decoder_model` parameter (`decoder_model=lstm_model`), or"
" - a dictionary of configuration parameters that will be used to initialize a"
" torch.nn.LSTM model as `decoder_config` keyword argument. "
" E.g. `decoder_config={'input_size': 768, 'hidden_size': 768, 'num_layers': 2}`"
)
kwargs["decoder_model"] = torch.nn.LSTM(kwargs.pop("decoder_config"))
model = super(Model2LSTM, cls).from_pretrained(*args, **kwargs)
return model
| [
"logging.getLogger",
"os.path.join"
] | [((902, 929), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (919, 929), False, 'import logging\n'), ((9721, 9760), 'os.path.join', 'os.path.join', (['save_directory', '"""encoder"""'], {}), "(save_directory, 'encoder')\n", (9733, 9760), False, 'import os\n'), ((9800, 9839), 'os.path.join', 'os.path.join', (['save_directory', '"""decoder"""'], {}), "(save_directory, 'decoder')\n", (9812, 9839), False, 'import os\n')] |
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from datetime import datetime, timedelta
import pandas as pd
import random
# Default args definition
default_args = {
'owner': 'Rafael',
'depends_on_past': False,
'start_date': datetime(2020, 11, 29, 18, 20),
'email': ['<EMAIL>', '<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'Retry_delay': timedelta(minutes=1)
}
# Dag definition
dag = DAG(
'treino-03',
description="Extrai dados do Titanic e calcula idade media para homens ou mulheres",
default_args = default_args,
schedule_interval='*/20 * * * *'
)
get_data = BashOperator(
task_id='get-data',
bash_command='curl https://raw.githubusercontent.com/A3Data/hermione/master/hermione/file_text/train.csv -o /usr/local/airflow/data/train.csv',
dag=dag
)
def sorteia_h_m():
return random.choice(['male', 'female'])
escolhe_h_m = PythonOperator(
task_id='escolhe-h-m',
python_callable=sorteia_h_m,
dag=dag
)
def MouF(**context):
value=context['task_instance'].xcom_pull(task_ids='escolhe-h-m')
if value == 'male':
return 'branch_homem'
else:
return 'branch_mulher'
male_female = BranchPythonOperator(
task_id='condicional',
python_callable=MouF,
provide_context=True,
dag=dag
)
def mean_homem():
df = pd.read_csv('/usr/local/airflow/data/train.csv')
med = df.loc[df.Sex == 'male'].Age.mean()
print(f'Media de idade dos homens no Titanic: {med}')
branch_homem = PythonOperator(
task_id='branch_homem',
python_callable=mean_homem,
dag=dag
)
def mean_mulher():
df = pd.read_csv('/usr/local/airflow/data/train.csv')
med = df.loc[df.Sex == 'female'].Age.mean()
print(f'Media de idade das mulheres no Titanic: {med}')
branch_mulher = PythonOperator(
task_id='branch_mulher',
python_callable=mean_mulher,
dag=dag
)
get_data >> escolhe_h_m >> male_female >> [branch_homem, branch_mulher]
| [
"datetime.datetime",
"random.choice",
"airflow.operators.python_operator.PythonOperator",
"pandas.read_csv",
"airflow.operators.bash_operator.BashOperator",
"airflow.DAG",
"datetime.timedelta",
"airflow.operators.python_operator.BranchPythonOperator"
] | [((567, 738), 'airflow.DAG', 'DAG', (['"""treino-03"""'], {'description': '"""Extrai dados do Titanic e calcula idade media para homens ou mulheres"""', 'default_args': 'default_args', 'schedule_interval': '"""*/20 * * * *"""'}), "('treino-03', description=\n 'Extrai dados do Titanic e calcula idade media para homens ou mulheres',\n default_args=default_args, schedule_interval='*/20 * * * *')\n", (570, 738), False, 'from airflow import DAG\n'), ((762, 957), 'airflow.operators.bash_operator.BashOperator', 'BashOperator', ([], {'task_id': '"""get-data"""', 'bash_command': '"""curl https://raw.githubusercontent.com/A3Data/hermione/master/hermione/file_text/train.csv -o /usr/local/airflow/data/train.csv"""', 'dag': 'dag'}), "(task_id='get-data', bash_command=\n 'curl https://raw.githubusercontent.com/A3Data/hermione/master/hermione/file_text/train.csv -o /usr/local/airflow/data/train.csv'\n , dag=dag)\n", (774, 957), False, 'from airflow.operators.bash_operator import BashOperator\n'), ((1042, 1117), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""escolhe-h-m"""', 'python_callable': 'sorteia_h_m', 'dag': 'dag'}), "(task_id='escolhe-h-m', python_callable=sorteia_h_m, dag=dag)\n", (1056, 1117), False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((1333, 1433), 'airflow.operators.python_operator.BranchPythonOperator', 'BranchPythonOperator', ([], {'task_id': '"""condicional"""', 'python_callable': 'MouF', 'provide_context': '(True)', 'dag': 'dag'}), "(task_id='condicional', python_callable=MouF,\n provide_context=True, dag=dag)\n", (1353, 1433), False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((1645, 1720), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""branch_homem"""', 'python_callable': 'mean_homem', 'dag': 'dag'}), "(task_id='branch_homem', python_callable=mean_homem, dag=dag)\n", (1659, 1720), False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((1938, 2015), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""branch_mulher"""', 'python_callable': 'mean_mulher', 'dag': 'dag'}), "(task_id='branch_mulher', python_callable=mean_mulher, dag=dag)\n", (1952, 2015), False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((354, 384), 'datetime.datetime', 'datetime', (['(2020)', '(11)', '(29)', '(18)', '(20)'], {}), '(2020, 11, 29, 18, 20)\n', (362, 384), False, 'from datetime import datetime, timedelta\n'), ((520, 540), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (529, 540), False, 'from datetime import datetime, timedelta\n'), ((993, 1026), 'random.choice', 'random.choice', (["['male', 'female']"], {}), "(['male', 'female'])\n", (1006, 1026), False, 'import random\n'), ((1476, 1524), 'pandas.read_csv', 'pd.read_csv', (['"""/usr/local/airflow/data/train.csv"""'], {}), "('/usr/local/airflow/data/train.csv')\n", (1487, 1524), True, 'import pandas as pd\n'), ((1764, 1812), 'pandas.read_csv', 'pd.read_csv', (['"""/usr/local/airflow/data/train.csv"""'], {}), "('/usr/local/airflow/data/train.csv')\n", (1775, 1812), True, 'import pandas as pd\n')] |
# This is a reusable webcraawler architecture that can be adapted to scrape any webstie.
# RESULTS:
# Roughly 24 seconds per thousand courses scraped for ThreadPoolExecutor vs 63s for unthreaded script.
# This is a very basic implementation of multithreading in order to show the proof of concept, but is a good base to build off of.
import requests
from bs4 import BeautifulSoup
import csv
from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor
import time
import logging
from mitopencourseware_crawler_worker import mit_crawler
def courses_spider(max_pages):
data_to_csv = [] #holds all data to send to csv
print("Webcrawler workers have started, please wait while we finish crawling...")
# remove max pages loop (unecessary)
page = 1
while page <= max_pages:
url = 'https://ocw.mit.edu/courses/'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
# Multithread only the work:
# Tuning is required to find the most efficient amount of workers in the thread pool.
with ThreadPoolExecutor(max_workers=30) as executor:
start = time.time()
futures = [ executor.submit(work, link) for link in soup.findAll('h4', {'class': 'course_title'}, limit=100) ]
data_to_csv = []
for result in as_completed(futures):
data_to_csv.append(result.result())
end = time.time()
print("Time Taken to complete: {:.6f}s".format(end-start))
print("Courses extracted: ", len(data_to_csv))
page += 1
export_to_csv(data_to_csv)
def work(link):
# replace this fucntion with the specific crawler you want to use:
return mit_crawler(link)
# Exports data to a formatted csv file, this will be replaced with multithreaded API calls to the Cassandra Prisma Database
# or on the cloud in production, it will be sent to the S3 temporary database to be picked up by the AWS Lambda funtion which will push it to the Cassandra Database
def export_to_csv(csv_data):
with open('web_crawl_data.csv',mode='w') as csv_file:
field_names = ['Title','URL extension','External Website Logo','URL(href)','Description','Course logo URL']
csv_writer = csv.DictWriter(csv_file, fieldnames=field_names)#delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writeheader()
for course in csv_data:
course_data = {
'Title':course[0],
'URL extension':course[1],
'External Website Logo':course[2],
'URL(href)':course[3],
'Description':course[4],
'Course logo URL':course[5],
}
csv_writer.writerow(course_data)
| [
"csv.DictWriter",
"concurrent.futures.ThreadPoolExecutor",
"mitopencourseware_crawler_worker.mit_crawler",
"requests.get",
"bs4.BeautifulSoup",
"concurrent.futures.as_completed",
"time.time"
] | [((1782, 1799), 'mitopencourseware_crawler_worker.mit_crawler', 'mit_crawler', (['link'], {}), '(link)\n', (1793, 1799), False, 'from mitopencourseware_crawler_worker import mit_crawler\n'), ((885, 902), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (897, 902), False, 'import requests\n'), ((956, 996), 'bs4.BeautifulSoup', 'BeautifulSoup', (['plain_text', '"""html.parser"""'], {}), "(plain_text, 'html.parser')\n", (969, 996), False, 'from bs4 import BeautifulSoup\n'), ((2316, 2364), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': 'field_names'}), '(csv_file, fieldnames=field_names)\n', (2330, 2364), False, 'import csv\n'), ((1141, 1175), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(30)'}), '(max_workers=30)\n', (1159, 1175), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor\n'), ((1209, 1220), 'time.time', 'time.time', ([], {}), '()\n', (1218, 1220), False, 'import time\n'), ((1399, 1420), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (1411, 1420), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor\n'), ((1492, 1503), 'time.time', 'time.time', ([], {}), '()\n', (1501, 1503), False, 'import time\n')] |
from typing import List, Optional, NewType, Tuple, NamedTuple, Type
import attr
from jinja2 import Template as JinjaTemplate, StrictUndefined
from genyrator.entities.Entity import Entity
from genyrator.path import create_relative_path
OutPath = NewType('OutPath', Tuple[List[str], str])
Import = NamedTuple('Import',
[('module_name', str),
('imports', List[str]), ])
@attr.s
class Template(object):
template_name: str = attr.ib()
template_file_name: str = attr.ib()
template_file_path: List[str] = attr.ib()
relative_path: List[str] = attr.ib()
out_path: Optional[OutPath] = attr.ib()
def create_template(self):
path = create_relative_path(
[*self.template_file_path, self.template_file_name]
)
with open(path) as f:
template = JinjaTemplate(f.read(), undefined=StrictUndefined)
return template
def render(self):
return self.create_template().render(template=self)
def create_template(
constructor,
template_path: Optional[List[str]] = None,
out_path: Optional[OutPath] = None,
**kwargs,
) -> Template:
relative_path = template_path[0:-1]
path = ['genyrator', 'templates'] + relative_path
template_name = template_path[-1]
return constructor(
template_name=template_name,
template_file_name='{}.j2'.format(template_name),
template_file_path=path,
out_path=out_path,
relative_path=relative_path,
**kwargs,
)
@attr.s
class RootInit(Template):
db_import_path: str = attr.ib()
module_name: str = attr.ib()
@attr.s
class RootSchema(Template):
module_name: str = attr.ib()
entities: List[Entity] = attr.ib()
@attr.s
class ConvertDict(Template):
module_name: str = attr.ib()
@attr.s
class SQLAlchemyModel(Template):
module_name: str = attr.ib()
db_import_path: str = attr.ib()
entity: Entity = attr.ib()
@attr.s
class ModelToDict(Template):
module_name: str = attr.ib()
@attr.s
class Config(Template):
module_name: str = attr.ib()
@attr.s
class SQLAlchemyModelInit(Template):
module_name: str = attr.ib()
db_import_path: str = attr.ib()
imports: List[Import] = attr.ib()
@attr.s
class RestplusModel(Template):
entity: Entity = attr.ib()
@attr.s
class Resource(Template):
module_name: str = attr.ib()
db_import_path: str = attr.ib()
entity: Entity = attr.ib()
restplus_template: str = attr.ib()
TypeOption: Type = attr.ib()
@attr.s
class ResourcesInit(Template):
entities: List[Entity] = attr.ib()
module_name: str = attr.ib()
api_name: str = attr.ib()
api_description: str = attr.ib()
@attr.s
class DomainModel(Template):
entity: Entity = attr.ib()
module_name: str = attr.ib()
def sqlalchemy_model_imports(self):
return list(set([
rel.target_entity_class_name
for rel in self.entity.relationships
]))
@attr.s
class ConvertProperties(Template):
module_name: str = attr.ib()
@attr.s
class ConvertModels(Template):
module_name: str = attr.ib()
@attr.s
class JoinEntities(Template):
module_name: str = attr.ib()
@attr.s
class ConvertDictToMarshmallow(Template):
module_name: str = attr.ib()
db_import_path: str = attr.ib()
@attr.s
class Fixture(Template):
db_import_path: str = attr.ib()
module_name: str = attr.ib()
entity: Entity = attr.ib()
| [
"genyrator.path.create_relative_path",
"typing.NamedTuple",
"typing.NewType",
"attr.ib"
] | [((247, 288), 'typing.NewType', 'NewType', (['"""OutPath"""', 'Tuple[List[str], str]'], {}), "('OutPath', Tuple[List[str], str])\n", (254, 288), False, 'from typing import List, Optional, NewType, Tuple, NamedTuple, Type\n'), ((298, 366), 'typing.NamedTuple', 'NamedTuple', (['"""Import"""', "[('module_name', str), ('imports', List[str])]"], {}), "('Import', [('module_name', str), ('imports', List[str])])\n", (308, 366), False, 'from typing import List, Optional, NewType, Tuple, NamedTuple, Type\n'), ((492, 501), 'attr.ib', 'attr.ib', ([], {}), '()\n', (499, 501), False, 'import attr\n'), ((546, 555), 'attr.ib', 'attr.ib', ([], {}), '()\n', (553, 555), False, 'import attr\n'), ((600, 609), 'attr.ib', 'attr.ib', ([], {}), '()\n', (607, 609), False, 'import attr\n'), ((654, 663), 'attr.ib', 'attr.ib', ([], {}), '()\n', (661, 663), False, 'import attr\n'), ((708, 717), 'attr.ib', 'attr.ib', ([], {}), '()\n', (715, 717), False, 'import attr\n'), ((1683, 1692), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1690, 1692), False, 'import attr\n'), ((1719, 1728), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1726, 1728), False, 'import attr\n'), ((1799, 1808), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1806, 1808), False, 'import attr\n'), ((1841, 1850), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1848, 1850), False, 'import attr\n'), ((1913, 1922), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1920, 1922), False, 'import attr\n'), ((1992, 2001), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1999, 2001), False, 'import attr\n'), ((2028, 2037), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2035, 2037), False, 'import attr\n'), ((2064, 2073), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2071, 2073), False, 'import attr\n'), ((2136, 2145), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2143, 2145), False, 'import attr\n'), ((2203, 2212), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2210, 2212), False, 'import attr\n'), ((2295, 2304), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2302, 2304), False, 'import attr\n'), ((2340, 2349), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2347, 2349), False, 'import attr\n'), ((2385, 2394), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2392, 2394), False, 'import attr\n'), ((2457, 2466), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2464, 2466), False, 'import attr\n'), ((2535, 2544), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2542, 2544), False, 'import attr\n'), ((2577, 2586), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2584, 2586), False, 'import attr\n'), ((2619, 2628), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2626, 2628), False, 'import attr\n'), ((2661, 2670), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2668, 2670), False, 'import attr\n'), ((2703, 2712), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2710, 2712), False, 'import attr\n'), ((2790, 2799), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2797, 2799), False, 'import attr\n'), ((2836, 2845), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2843, 2845), False, 'import attr\n'), ((2882, 2891), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2889, 2891), False, 'import attr\n'), ((2928, 2937), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2935, 2937), False, 'import attr\n'), ((3003, 3012), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3010, 3012), False, 'import attr\n'), ((3039, 3048), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3046, 3048), False, 'import attr\n'), ((3286, 3295), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3293, 3295), False, 'import attr\n'), ((3360, 3369), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3367, 3369), False, 'import attr\n'), ((3433, 3442), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3440, 3442), False, 'import attr\n'), ((3521, 3530), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3528, 3530), False, 'import attr\n'), ((3557, 3566), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3564, 3566), False, 'import attr\n'), ((3628, 3637), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3635, 3637), False, 'import attr\n'), ((3664, 3673), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3671, 3673), False, 'import attr\n'), ((3700, 3709), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3707, 3709), False, 'import attr\n'), ((765, 838), 'genyrator.path.create_relative_path', 'create_relative_path', (['[*self.template_file_path, self.template_file_name]'], {}), '([*self.template_file_path, self.template_file_name])\n', (785, 838), False, 'from genyrator.path import create_relative_path\n')] |
# uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\mode_collector.py
# Compiled at: 2018-11-30 15:48:11
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listenable_property, listens, EventObject
class ModeCollector(EventObject):
def __init__(self, main_modes=None, mix_modes=None, global_mix_modes=None, device_modes=None, *a, **k):
super(ModeCollector, self).__init__(*a, **k)
self._main_modes = main_modes
self._mix_modes = mix_modes
self._global_mix_modes = global_mix_modes
self._device_modes = device_modes
self._on_selected_main_mode_changed.subject = main_modes
self._on_selected_mix_mode_changed.subject = mix_modes
self._on_selected_global_mix_mode_changed.subject = global_mix_modes
self._on_selected_device_mode_changed.subject = device_modes
@listenable_property
def main_mode(self):
return self._main_modes.selected_mode
@listens(b'selected_mode')
def _on_selected_main_mode_changed(self, mode):
self.notify_main_mode()
@listenable_property
def mix_mode(self):
return self._mix_modes.selected_mode
@listens(b'selected_mode')
def _on_selected_mix_mode_changed(self, mode):
self.notify_mix_mode()
@listenable_property
def global_mix_mode(self):
return self._global_mix_modes.selected_mode
@listens(b'selected_mode')
def _on_selected_global_mix_mode_changed(self, mode):
self.notify_global_mix_mode()
@listenable_property
def device_mode(self):
return self._device_modes.selected_mode
@listens(b'selected_mode')
def _on_selected_device_mode_changed(self, mode):
self.notify_device_mode() | [
"ableton.v2.base.listens"
] | [((1193, 1218), 'ableton.v2.base.listens', 'listens', (["b'selected_mode'"], {}), "(b'selected_mode')\n", (1200, 1218), False, 'from ableton.v2.base import listenable_property, listens, EventObject\n'), ((1404, 1429), 'ableton.v2.base.listens', 'listens', (["b'selected_mode'"], {}), "(b'selected_mode')\n", (1411, 1429), False, 'from ableton.v2.base import listenable_property, listens, EventObject\n'), ((1627, 1652), 'ableton.v2.base.listens', 'listens', (["b'selected_mode'"], {}), "(b'selected_mode')\n", (1634, 1652), False, 'from ableton.v2.base import listenable_property, listens, EventObject\n'), ((1856, 1881), 'ableton.v2.base.listens', 'listens', (["b'selected_mode'"], {}), "(b'selected_mode')\n", (1863, 1881), False, 'from ableton.v2.base import listenable_property, listens, EventObject\n')] |
from pytest import raises
from discopy.cartesian import *
def test_Box_repr():
f = Box('f', 1, 2, lambda x: (x, x))
assert "Box('f', 1, 2" in repr(f)
def test_Function_str():
f = Function(2, 1, lambda x, y: x + y)
assert 'Function(dom=2, cod=1,' in str(f)
def test_Function_call():
f = Swap(2, 1)
values = (2, 3)
with raises(TypeError) as err:
f(*values)
assert str(err.value) == messages.expected_input_length(f, values)
def test_Function_then():
f, g = Function(2, 1, lambda x, y: x + y), Function(1, 1, lambda x: x + 1)
assert Function.id(2).then(*(f, g))(20, 21) == 42
def test_Function_then_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f >> g
assert str(err.value) == messages.type_err(Function, g)
g = Function.id(2)
with raises(AxiomError) as err:
f >> g
assert str(err.value) == messages.does_not_compose(f, g)
def test_Function_tensor():
assert Function.id(3)(1, 2, 3)\
== Function.id(0).tensor(*(3 * [Function.id(1)]))(1, 2, 3)
def test_Function_tensor_err():
f = Function(2, 1, lambda x, y: x + y)
g = (lambda x: x, )
with raises(TypeError) as err:
f @ g
assert str(err.value) == messages.type_err(Function, g)
| [
"pytest.raises"
] | [((352, 369), 'pytest.raises', 'raises', (['TypeError'], {}), '(TypeError)\n', (358, 369), False, 'from pytest import raises\n'), ((737, 754), 'pytest.raises', 'raises', (['TypeError'], {}), '(TypeError)\n', (743, 754), False, 'from pytest import raises\n'), ((870, 888), 'pytest.raises', 'raises', (['AxiomError'], {}), '(AxiomError)\n', (876, 888), False, 'from pytest import raises\n'), ((1216, 1233), 'pytest.raises', 'raises', (['TypeError'], {}), '(TypeError)\n', (1222, 1233), False, 'from pytest import raises\n')] |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#from math import *
from math import sin, cos
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core.base import QComponent
import numpy as np
#from ... import config
#if not config.is_building_docs():
# from qiskit_metal import is_true
class TransmonInterdigitated(QComponent):
"""
The base "TransmonInterdigitated" inherits the "QComponent" class.
This creates a transmon pocket with two large pads connected by a Josephson
junction. Both pads have four interdigitated "fingers" which increase the
capacitance of the structure. There are three coupling capacitor pads with qpins
defined; these can be connected to other structures in a design using CPWs.
Default Options:
* pad_width: '1000um' -- width of the large rectanglular pads on either side
of the junction
* pad_height: '300um' -- height of the large rectanglular pads on either side
of the junction
* finger_width: '50um' -- width of the "finger" on either side of the junction
* finger_height: '100um' -- height of the "finger" on the side of the junction
* finger_space: '50um' -- height of the Josephson Junction (equivalently; space
between two fingers)
* pad_pos_x: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* pad_pos_y: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* comb_width: '50um' -- the width of the four interdigitated combs connected to
either pad
* comb_space_vert: '50um' -- the space between the edge of a comb and the edge of
the opposite rectangular pad
* comb_space_hor: '50um' -- the space between adjacent interdigitated comb structures
* jj_width: '20um' -- the width of the Josephson Junction located between the two
fingers of the device
* cc_space: '50um' -- the space between the lower rectangular pad and the coupling
capacitor below it
* cc_width: '100um' -- the width of the coupling capacitor located below the bottom
rectangular pad
* cc_height: '100um' -- the height of the coupling capacitor located below the bottom
rectangular pad
* cc_topleft_space: '50um' -- the space between the upper rectangular pad and the top
left coupling capacitor
* cc_topleft_width: '100um' -- the width of the top left coupling capacitor pad
* cc_topleft_height: '100um' -- the height of the top left coupling capacitor pad
* cc_topright_space: '50um' -- the space between the upper rectangular pad and the
top right coupling capacitor
* cc_topright_width: '100um' -- the width of the top right coupling capacitor pad
* cc_topright_height: '100um' -- the height of the top right coupling capacitor pad
* position_x: '0um' -- the x-coordinate defining the center of the transmon pocket
on the chip
* position_y: '0um' -- the y-coordinate defining the center of the transmon pocket
on the chip
* rotation: '0.0' -- the angle at which the entire structure is rotated
* rotation_top_pad: '180' -- internal coordinate defining the angle of rotation
between top and bottom pads
* layer: '1' -- all objcets are drawn assuming they are part of the same layer on a
the chip
"""
# Default drawing options
default_options = Dict(pad_width='1000um',
pad_height='300um',
finger_width='50um',
finger_height='100um',
finger_space='50um',
pad_pos_x='0um',
pad_pos_y='0um',
comb_width='50um',
comb_space_vert='50um',
comb_space_hor='50um',
jj_width='20um',
cc_space='50um',
cc_width='100um',
cc_height='100um',
cc_topleft_space='50um',
cc_topleft_width='100um',
cc_topleft_height='100um',
cc_topright_space='50um',
cc_topright_width='100um',
cc_topright_height='100um',
position_x='0um',
position_y='0um',
rotation='0.0',
rotation_top_pad='180',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# draw the lower pad as a rectangle
pad_lower = draw.rectangle(p.pad_width, p.pad_height, p.pad_pos_x,
p.pad_pos_y)
# draw the lower finger as a rectangle
finger_lower = draw.rectangle(
p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y +
0.49999 * (p.pad_height) + 0.49999 * (p.finger_height))
# draw the Josephson Junction
rect_jj = draw.rectangle(
p.jj_width, p.finger_space, p.pad_pos_x,
0.5 * (p.pad_height) + p.finger_height + 0.5 * (p.finger_space))
# draw the first comb to the right of the lower finger as a rectangle
comb1_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the right of the lower finger by translating the first comb
comb2_lower = draw.translate(comb1_lower,
2.0 * (p.comb_space_hor + p.comb_width),
0.0)
# draw the first comb to the left of the lower finger
comb3_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(-0.5 * p.finger_width - 2.0 * p.comb_space_hor -
1.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the left of the lower finger
comb4_lower = draw.translate(comb3_lower,
-2.0 * (p.comb_space_hor + p.comb_width),
0.0)
coupling_capacitor = draw.rectangle(
p.cc_width, p.cc_height, p.pad_pos_x,
p.pad_pos_y - 0.5 * (p.pad_height) - p.cc_space - 0.5 * p.cc_height)
cc_topleft = draw.rectangle(
p.cc_topleft_width, p.cc_topleft_height,
p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height)
cc_topright = draw.translate(
cc_topleft,
p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width,
0.0)
# merge the bottom elements
bottom = draw.union(pad_lower, finger_lower, comb1_lower, comb2_lower,
comb3_lower, comb4_lower)
# create the top portion of the comb by translating and rotating
# the bottom portion of the comb
top = draw.translate(bottom, 0.0, p.pad_height + p.finger_space)
top = draw.rotate(top, p.rotation_top_pad)
# merge everything into a single design
design = draw.union(bottom, top, rect_jj, coupling_capacitor,
cc_topleft, cc_topright)
# draw the transmon pocket bounding box
pocket = draw.rectangle(1.5 * p.pad_width, 5.0 * p.pad_height)
# the origin is originally set to the middle of the lower pad.
# Let's move it to the center of the JJ.
design = draw.translate(
design, 0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# now translate the final structure according to the user input
design = draw.rotate(design, p.rotation, origin=(0, 0))
design = draw.translate(design, p.position_x, p.position_y)
pocket = draw.rotate(pocket, p.rotation, origin=(0, 0))
pocket = draw.translate(pocket, p.position_x, p.position_y)
geom = {'design': design}
geom_pocket = {'pocket': pocket}
self.add_qgeometry('poly', geom, layer=p.layer, subtract=False)
self.add_qgeometry('poly', geom_pocket, layer=p.layer, subtract=True)
###################################################################
# Add Qpin connections for coupling capacitors
# define a function that both rotates and translates the
# qpin coordinates
def qpin_rotate_translate(x):
""" This function rotates the coordinates of the three qpins
according to the user inputs for "position_x", "position_y"
and "rotation".
"""
y = list(x)
z = [0.0, 0.0]
z[0] = y[0] * cos(p.rotation * 3.14159 / 180) - y[1] * sin(
p.rotation * 3.14159 / 180)
z[1] = y[0] * sin(p.rotation * 3.14159 / 180) + y[1] * cos(
p.rotation * 3.14159 / 180)
z[0] = z[0] + p.position_x
z[1] = z[1] + p.position_y
x = (z[0], z[1])
return x
# Add Qpin connections for the bottom coupling capacitor
qp1a = (0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp1b = (0.0, -0.5 * p.pad_height - p.cc_space - p.cc_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# rotate and translate the qpin coordinates
qp1a = qpin_rotate_translate(qp1a)
qp1b = qpin_rotate_translate(qp1b)
self.add_pin('pin1',
points=np.array([qp1a, qp1b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top left coupling capacitor
qp2a = (p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp2b = (p.pad_pos_x - 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp2a = qpin_rotate_translate(qp2a)
qp2b = qpin_rotate_translate(qp2b)
self.add_pin('pin2',
points=np.array([qp2a, qp2b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top right coupling capacitor
qp3a = (p.pad_pos_x + 0.5 * p.pad_width - 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp3b = (p.pad_pos_x + 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp3a = qpin_rotate_translate(qp3a)
qp3b = qpin_rotate_translate(qp3b)
self.add_pin('pin3',
points=np.array([qp3a, qp3b]),
width=0.01,
input_as_norm=True)
| [
"qiskit_metal.draw.union",
"math.cos",
"qiskit_metal.Dict",
"numpy.array",
"qiskit_metal.draw.rectangle",
"qiskit_metal.draw.translate",
"qiskit_metal.draw.rotate",
"math.sin"
] | [((4010, 4566), 'qiskit_metal.Dict', 'Dict', ([], {'pad_width': '"""1000um"""', 'pad_height': '"""300um"""', 'finger_width': '"""50um"""', 'finger_height': '"""100um"""', 'finger_space': '"""50um"""', 'pad_pos_x': '"""0um"""', 'pad_pos_y': '"""0um"""', 'comb_width': '"""50um"""', 'comb_space_vert': '"""50um"""', 'comb_space_hor': '"""50um"""', 'jj_width': '"""20um"""', 'cc_space': '"""50um"""', 'cc_width': '"""100um"""', 'cc_height': '"""100um"""', 'cc_topleft_space': '"""50um"""', 'cc_topleft_width': '"""100um"""', 'cc_topleft_height': '"""100um"""', 'cc_topright_space': '"""50um"""', 'cc_topright_width': '"""100um"""', 'cc_topright_height': '"""100um"""', 'position_x': '"""0um"""', 'position_y': '"""0um"""', 'rotation': '"""0.0"""', 'rotation_top_pad': '"""180"""', 'layer': '"""1"""'}), "(pad_width='1000um', pad_height='300um', finger_width='50um',\n finger_height='100um', finger_space='50um', pad_pos_x='0um', pad_pos_y=\n '0um', comb_width='50um', comb_space_vert='50um', comb_space_hor='50um',\n jj_width='20um', cc_space='50um', cc_width='100um', cc_height='100um',\n cc_topleft_space='50um', cc_topleft_width='100um', cc_topleft_height=\n '100um', cc_topright_space='50um', cc_topright_width='100um',\n cc_topright_height='100um', position_x='0um', position_y='0um',\n rotation='0.0', rotation_top_pad='180', layer='1')\n", (4014, 4566), False, 'from qiskit_metal import draw, Dict\n'), ((5306, 5334), 'qiskit_metal.Dict', 'Dict', ([], {'short_name': '"""component"""'}), "(short_name='component')\n", (5310, 5334), False, 'from qiskit_metal import draw, Dict\n'), ((5576, 5643), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.pad_width', 'p.pad_height', 'p.pad_pos_x', 'p.pad_pos_y'], {}), '(p.pad_width, p.pad_height, p.pad_pos_x, p.pad_pos_y)\n', (5590, 5643), False, 'from qiskit_metal import draw, Dict\n'), ((5750, 5881), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.finger_width', 'p.finger_height', 'p.pad_pos_x', '(p.pad_pos_y + 0.49999 * p.pad_height + 0.49999 * p.finger_height)'], {}), '(p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y + \n 0.49999 * p.pad_height + 0.49999 * p.finger_height)\n', (5764, 5881), False, 'from qiskit_metal import draw, Dict\n'), ((5963, 6083), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.jj_width', 'p.finger_space', 'p.pad_pos_x', '(0.5 * p.pad_height + p.finger_height + 0.5 * p.finger_space)'], {}), '(p.jj_width, p.finger_space, p.pad_pos_x, 0.5 * p.pad_height +\n p.finger_height + 0.5 * p.finger_space)\n', (5977, 6083), False, 'from qiskit_metal import draw, Dict\n'), ((6210, 6460), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.comb_width', '(2 * p.finger_height + p.finger_space - p.comb_space_vert)', '(0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width)', '(0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.pad_height + 0.5 * p.\n finger_height))'], {}), '(p.comb_width, 2 * p.finger_height + p.finger_space - p.\n comb_space_vert, 0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.\n comb_width, 0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.\n pad_height + 0.5 * p.finger_height))\n', (6224, 6460), False, 'from qiskit_metal import draw, Dict\n'), ((6635, 6708), 'qiskit_metal.draw.translate', 'draw.translate', (['comb1_lower', '(2.0 * (p.comb_space_hor + p.comb_width))', '(0.0)'], {}), '(comb1_lower, 2.0 * (p.comb_space_hor + p.comb_width), 0.0)\n', (6649, 6708), False, 'from qiskit_metal import draw, Dict\n'), ((6868, 7124), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.comb_width', '(2 * p.finger_height + p.finger_space - p.comb_space_vert)', '(-0.5 * p.finger_width - 2.0 * p.comb_space_hor - 1.5 * p.comb_width)', '(0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.pad_height + 0.5 * p.\n finger_height))'], {}), '(p.comb_width, 2 * p.finger_height + p.finger_space - p.\n comb_space_vert, -0.5 * p.finger_width - 2.0 * p.comb_space_hor - 1.5 *\n p.comb_width, 0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.\n pad_height + 0.5 * p.finger_height))\n', (6882, 7124), False, 'from qiskit_metal import draw, Dict\n'), ((7282, 7356), 'qiskit_metal.draw.translate', 'draw.translate', (['comb3_lower', '(-2.0 * (p.comb_space_hor + p.comb_width))', '(0.0)'], {}), '(comb3_lower, -2.0 * (p.comb_space_hor + p.comb_width), 0.0)\n', (7296, 7356), False, 'from qiskit_metal import draw, Dict\n'), ((7461, 7585), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.cc_width', 'p.cc_height', 'p.pad_pos_x', '(p.pad_pos_y - 0.5 * p.pad_height - p.cc_space - 0.5 * p.cc_height)'], {}), '(p.cc_width, p.cc_height, p.pad_pos_x, p.pad_pos_y - 0.5 * p.\n pad_height - p.cc_space - 0.5 * p.cc_height)\n', (7475, 7585), False, 'from qiskit_metal import draw, Dict\n'), ((7630, 7883), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.cc_topleft_width', 'p.cc_topleft_height', '(p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width)', '(p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +\n p.cc_topleft_space + 0.5 * p.cc_topleft_height)'], {}), '(p.cc_topleft_width, p.cc_topleft_height, p.pad_pos_x - 0.5 *\n p.pad_width + 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.\n pad_height + 2.0 * p.finger_height + p.finger_space + p.\n cc_topleft_space + 0.5 * p.cc_topleft_height)\n', (7644, 7883), False, 'from qiskit_metal import draw, Dict\n'), ((7942, 8046), 'qiskit_metal.draw.translate', 'draw.translate', (['cc_topleft', '(p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width)', '(0.0)'], {}), '(cc_topleft, p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p\n .cc_topright_width, 0.0)\n', (7956, 8046), False, 'from qiskit_metal import draw, Dict\n'), ((8133, 8224), 'qiskit_metal.draw.union', 'draw.union', (['pad_lower', 'finger_lower', 'comb1_lower', 'comb2_lower', 'comb3_lower', 'comb4_lower'], {}), '(pad_lower, finger_lower, comb1_lower, comb2_lower, comb3_lower,\n comb4_lower)\n', (8143, 8224), False, 'from qiskit_metal import draw, Dict\n'), ((8378, 8436), 'qiskit_metal.draw.translate', 'draw.translate', (['bottom', '(0.0)', '(p.pad_height + p.finger_space)'], {}), '(bottom, 0.0, p.pad_height + p.finger_space)\n', (8392, 8436), False, 'from qiskit_metal import draw, Dict\n'), ((8451, 8487), 'qiskit_metal.draw.rotate', 'draw.rotate', (['top', 'p.rotation_top_pad'], {}), '(top, p.rotation_top_pad)\n', (8462, 8487), False, 'from qiskit_metal import draw, Dict\n'), ((8554, 8631), 'qiskit_metal.draw.union', 'draw.union', (['bottom', 'top', 'rect_jj', 'coupling_capacitor', 'cc_topleft', 'cc_topright'], {}), '(bottom, top, rect_jj, coupling_capacitor, cc_topleft, cc_topright)\n', (8564, 8631), False, 'from qiskit_metal import draw, Dict\n'), ((8726, 8779), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['(1.5 * p.pad_width)', '(5.0 * p.pad_height)'], {}), '(1.5 * p.pad_width, 5.0 * p.pad_height)\n', (8740, 8779), False, 'from qiskit_metal import draw, Dict\n'), ((8918, 9012), 'qiskit_metal.draw.translate', 'draw.translate', (['design', '(0.0)', '(-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)'], {}), '(design, 0.0, -0.5 * p.pad_height - p.finger_height - 0.5 * p\n .finger_space)\n', (8932, 9012), False, 'from qiskit_metal import draw, Dict\n'), ((9123, 9169), 'qiskit_metal.draw.rotate', 'draw.rotate', (['design', 'p.rotation'], {'origin': '(0, 0)'}), '(design, p.rotation, origin=(0, 0))\n', (9134, 9169), False, 'from qiskit_metal import draw, Dict\n'), ((9187, 9237), 'qiskit_metal.draw.translate', 'draw.translate', (['design', 'p.position_x', 'p.position_y'], {}), '(design, p.position_x, p.position_y)\n', (9201, 9237), False, 'from qiskit_metal import draw, Dict\n'), ((9256, 9302), 'qiskit_metal.draw.rotate', 'draw.rotate', (['pocket', 'p.rotation'], {'origin': '(0, 0)'}), '(pocket, p.rotation, origin=(0, 0))\n', (9267, 9302), False, 'from qiskit_metal import draw, Dict\n'), ((9320, 9370), 'qiskit_metal.draw.translate', 'draw.translate', (['pocket', 'p.position_x', 'p.position_y'], {}), '(pocket, p.position_x, p.position_y)\n', (9334, 9370), False, 'from qiskit_metal import draw, Dict\n'), ((10971, 10993), 'numpy.array', 'np.array', (['[qp1a, qp1b]'], {}), '([qp1a, qp1b])\n', (10979, 10993), True, 'import numpy as np\n'), ((11888, 11910), 'numpy.array', 'np.array', (['[qp2a, qp2b]'], {}), '([qp2a, qp2b])\n', (11896, 11910), True, 'import numpy as np\n'), ((12806, 12828), 'numpy.array', 'np.array', (['[qp3a, qp3b]'], {}), '([qp3a, qp3b])\n', (12814, 12828), True, 'import numpy as np\n'), ((10127, 10158), 'math.cos', 'cos', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10130, 10158), False, 'from math import sin, cos\n'), ((10168, 10199), 'math.sin', 'sin', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10171, 10199), False, 'from math import sin, cos\n'), ((10243, 10274), 'math.sin', 'sin', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10246, 10274), False, 'from math import sin, cos\n'), ((10284, 10315), 'math.cos', 'cos', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10287, 10315), False, 'from math import sin, cos\n')] |
# Copyright 2019-present NAVER Corp.
# Apache License v2.0
# <NAME>
import os, json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
class Seq2SQL_v1(nn.Module):
def __init__(self, input_size, hidden_size, num_layer, dropout,
number_cond_ops, number_agg_ops, old=False):
super(Seq2SQL_v1, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.max_where_number = 4
self.number_cond_ops = number_cond_ops
self.number_agg_ops = number_agg_ops
self.select_column_predict = SelectColumnPredict(input_size, hidden_size, num_layer, dropout)
self.select_agg_predict = SelectAggPredict(input_size, hidden_size, num_layer, dropout, number_agg_ops, old=old)
self.where_number_predict = WhereNumberPredict(input_size, hidden_size, num_layer, dropout)
self.wcp = WhereColumnPredict(input_size, hidden_size, num_layer, dropout)
self.wop = WhereOpPredict(input_size, hidden_size, num_layer, dropout, number_cond_ops)
self.wvp = WhereValuePredict_startend(input_size, hidden_size, num_layer, dropout, number_cond_ops, old=old) # start-end-search-discriminative model
# emb_question, [16,26,1536]
# len_question, [16]
# emb_header, [102,12,1536]
# len_header_token, [102]
# number_header, [16]
def forward(self, emb_question, len_question, emb_header, len_header_token, number_header,
g_sc=None, g_sa=None, g_wn=None, g_wc=None, g_wo=None, g_wvi=None,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
# sc
s_sc,s_sc_softmax = self.select_column_predict(emb_question, len_question, emb_header, len_header_token, number_header, show_p_sc=show_p_sc)
if g_sc:
pr_sc = g_sc
else:
pr_sc = pred_sc(s_sc)
# sa
s_sa,s_sa_softmax = self.select_agg_predict(emb_question, len_question, emb_header, len_header_token, number_header, pr_sc, show_p_sa=show_p_sa)
if g_sa:
# it's not necessary though.
pr_sa = g_sa
else:
pr_sa = pred_sa(s_sa)
# wn
s_wn,s_wn_softmax = self.where_number_predict(emb_question, len_question, emb_header, len_header_token, number_header, show_p_wn=show_p_wn)
if g_wn:
pr_wn = g_wn
else:
pr_wn = pred_wn(s_wn)
# wc
s_wc,s_wc_softmax = self.wcp(emb_question, len_question, emb_header, len_header_token, number_header, show_p_wc=show_p_wc, penalty=True)
if g_wc:
pr_wc = g_wc
else:
pr_wc = pred_wherecolumn(pr_wn, s_wc)
# wo
s_wo,s_wo_softmax = self.wop(emb_question, len_question, emb_header, len_header_token, number_header, wn=pr_wn, wc=pr_wc, show_p_wo=show_p_wo)
if g_wo:
pr_wo = g_wo
else:
pr_wo = pred_wo(pr_wn, s_wo)
# wv
s_wv,s_wv_softmax = self.wvp(emb_question, len_question, emb_header, len_header_token, number_header, wn=pr_wn, wc=pr_wc, wo=pr_wo, show_p_wv=show_p_wv)
return s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, s_sc_softmax, s_sa_softmax, s_wn_softmax, s_wc_softmax, s_wo_softmax, s_wv_softmax
def beam_forward(self, emb_question, len_question, emb_header, len_header_token, l_header, engine, tb,
nlu_t, nlu_wp_t, wp_to_wh_index, nlu,
beam_size=4,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
"""
Execution-guided beam decoding.
"""
# sc
s_sc,_ = self.select_column_predict(emb_question, len_question, emb_header, len_header_token, l_header, show_p_sc=show_p_sc)
prob_sc = F.softmax(s_sc, dim=-1)
bS, mcL = s_sc.shape
# minimum_header_length = min(l_header)
# beam_size = minimum_header_length if beam_size > minimum_header_length else beam_size
# sa
# Construct all possible sc_sa_score
prob_sc_sa = torch.zeros([bS, beam_size, self.number_agg_ops]).to(device)
prob_sca = torch.zeros_like(prob_sc_sa).to(device)
# get the top-k indices. pr_sc_beam = [B, beam_size]
pr_sc_beam = pred_sc_beam(s_sc, beam_size)
# calculate and predict s_sa.
for i_beam in range(beam_size):
pr_sc = list( array(pr_sc_beam)[:,i_beam] )
s_sa,_ = self.select_agg_predict(emb_question, len_question, emb_header, len_header_token, l_header, pr_sc, show_p_sa=show_p_sa)
prob_sa = F.softmax(s_sa, dim=-1)
prob_sc_sa[:, i_beam, :] = prob_sa
prob_sc_selected = prob_sc[range(bS), pr_sc] # [B]
prob_sca[:,i_beam,:] = (prob_sa.t() * prob_sc_selected).t()
# [mcL, B] * [B] -> [mcL, B] (element-wise multiplication)
# [mcL, B] -> [B, mcL]
# Calculate the dimension of tensor
# tot_dim = len(prob_sca.shape)
# First flatten to 1-d
idxs = topk_multi_dim(torch.tensor(prob_sca), n_topk=beam_size, batch_exist=True)
# Now as sc_idx is already sorted, re-map them properly.
idxs = remap_sc_idx(idxs, pr_sc_beam) # [sc_beam_idx, sa_idx] -> [sc_idx, sa_idx]
idxs_arr = array(idxs)
# [B, beam_size, remainig dim]
# idxs[b][0] gives first probable [sc_idx, sa_idx] pairs.
# idxs[b][1] gives of second.
# Calculate prob_sca, a joint probability
beam_idx_sca = [0] * bS
beam_meet_the_final = [False] * bS
while True:
pr_sc = idxs_arr[range(bS),beam_idx_sca,0]
pr_sa = idxs_arr[range(bS),beam_idx_sca,1]
# map index properly
check = check_sc_sa_pairs(tb, pr_sc, pr_sa)
if sum(check) == bS:
break
else:
for b, check1 in enumerate(check):
if not check1: # wrong pair
beam_idx_sca[b] += 1
if beam_idx_sca[b] >= beam_size:
beam_meet_the_final[b] = True
beam_idx_sca[b] -= 1
else:
beam_meet_the_final[b] = True
if sum(beam_meet_the_final) == bS:
break
# Now pr_sc, pr_sa are properly predicted.
pr_sc_best = list(pr_sc)
pr_sa_best = list(pr_sa)
# Now, Where-clause beam search.
s_wn,_ = self.where_number_predict(emb_question, len_question, emb_header, len_header_token, l_header, show_p_wn=show_p_wn)
prob_wn = F.softmax(s_wn, dim=-1).detach().to('cpu').numpy()
# Found "executable" most likely 4(=max_num_of_conditions) where-clauses.
# wc
s_wc,_ = self.wcp(emb_question, len_question, emb_header, len_header_token, l_header, show_p_wc=show_p_wc, penalty=True)
prob_wc = F.sigmoid(s_wc).detach().to('cpu').numpy()
# pr_wc_sorted_by_prob = pred_wc_sorted_by_prob(s_wc)
# get max_wn # of most probable columns & their prob.
pr_wn_max = [self.max_where_number] * bS
pr_wc_max = pred_wherecolumn(pr_wn_max, s_wc) # if some column do not have executable where-claouse, omit that column
prob_wc_max = zeros([bS, self.max_where_number])
for b, pr_wc_max1 in enumerate(pr_wc_max):
prob_wc_max[b,:] = prob_wc[b,pr_wc_max1]
# get most probable max_wn where-clouses
# wo
s_wo_max,_ = self.wop(emb_question, len_question, emb_header, len_header_token, l_header, wn=pr_wn_max, wc=pr_wc_max, show_p_wo=show_p_wo)
prob_wo_max = F.softmax(s_wo_max, dim=-1).detach().to('cpu').numpy()
# [B, max_wn, n_cond_op]
pr_wvi_beam_op_list = []
prob_wvi_beam_op_list = []
for i_op in range(self.number_cond_ops - 1):
pr_wo_temp = [[i_op] * self.max_where_number] * bS
# wv
s_wv,_ = self.wvp(emb_question, len_question, emb_header, len_header_token, l_header, wn=pr_wn_max, wc=pr_wc_max, wo=pr_wo_temp, show_p_wv=show_p_wv)
prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()
# prob_wv
pr_wvi_beam, prob_wvi_beam = pred_wvi_se_beam(self.max_where_number, s_wv, beam_size)
pr_wvi_beam_op_list.append(pr_wvi_beam)
prob_wvi_beam_op_list.append(prob_wvi_beam)
# pr_wvi_beam = [B, max_wn, k_logit**2 [st, ed] paris]
# pred_wv_beam
# Calculate joint probability of where-clause
# prob_w = [batch, wc, wo, wv] = [B, max_wn, n_cond_op, n_pairs]
n_wv_beam_pairs = prob_wvi_beam.shape[2]
prob_w = zeros([bS, self.max_where_number, self.number_cond_ops - 1, n_wv_beam_pairs])
for b in range(bS):
for i_wn in range(self.max_where_number):
for i_op in range(self.number_cond_ops - 1): # do not use final one
for i_wv_beam in range(n_wv_beam_pairs):
# i_wc = pr_wc_max[b][i_wn] # already done
p_wc = prob_wc_max[b, i_wn]
p_wo = prob_wo_max[b, i_wn, i_op]
p_wv = prob_wvi_beam_op_list[i_op][b, i_wn, i_wv_beam]
prob_w[b, i_wn, i_op, i_wv_beam] = p_wc * p_wo * p_wv
# Perform execution guided decoding
conds_max = []
prob_conds_max = []
# while len(conds_max) < self.max_wn:
idxs = topk_multi_dim(torch.tensor(prob_w), n_topk=beam_size, batch_exist=True)
# idxs = [B, i_wc_beam, i_op, i_wv_pairs]
# Construct conds1
for b, idxs1 in enumerate(idxs):
conds_max1 = []
prob_conds_max1 = []
for i_wn, idxs11 in enumerate(idxs1):
i_wc = pr_wc_max[b][idxs11[0]]
i_op = idxs11[1]
wvi = pr_wvi_beam_op_list[i_op][b][idxs11[0]][idxs11[2]]
# get wv_str
temp_pr_wv_str, _ = convert_pred_wvi_to_string([[wvi]], [nlu_t[b]], [nlu_wp_t[b]], [wp_to_wh_index[b]], [nlu[b]])
merged_wv11 = merge_wv_t1_eng(temp_pr_wv_str[0][0], nlu[b])
conds11 = [i_wc, i_op, merged_wv11]
prob_conds11 = prob_w[b, idxs11[0], idxs11[1], idxs11[2] ]
# test execution
# print(nlu[b])
# print(tb[b]['id'], tb[b]['types'], pr_sc[b], pr_sa[b], [conds11])
pr_ans = engine.execute(tb[b]['id'], pr_sc[b], pr_sa[b], [conds11])
if bool(pr_ans):
# pr_ans is not empty!
conds_max1.append(conds11)
prob_conds_max1.append(prob_conds11)
conds_max.append(conds_max1)
prob_conds_max.append(prob_conds_max1)
# May need to do more exhuastive search?
# i.e. up to.. getting all executable cases.
# Calculate total probability to decide the number of where-clauses
pr_sql_i = []
prob_wn_w = []
pr_wn_based_on_prob = []
for b, prob_wn1 in enumerate(prob_wn):
max_executable_wn1 = len( conds_max[b] )
prob_wn_w1 = []
prob_wn_w1.append(prob_wn1[0]) # wn=0 case.
for i_wn in range(max_executable_wn1):
prob_wn_w11 = prob_wn1[i_wn+1] * prob_conds_max[b][i_wn]
prob_wn_w1.append(prob_wn_w11)
pr_wn_based_on_prob.append(argmax(prob_wn_w1))
prob_wn_w.append(prob_wn_w1)
pr_sql_i1 = {'agg': pr_sa_best[b], 'sel': pr_sc_best[b], 'conds': conds_max[b][:pr_wn_based_on_prob[b]]}
pr_sql_i.append(pr_sql_i1)
# s_wv = [B, max_wn, max_nlu_tokens, 2]
return prob_sca, prob_w, prob_wn_w, pr_sc_best, pr_sa_best, pr_wn_based_on_prob, pr_sql_i
class SelectColumnPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3):
super(SelectColumnPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.sc_out = nn.Sequential(nn.Tanh(), nn.Linear(2 * hidden_size, 1))
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
# emb_question, [16,26,1536]
# len_question, [16]
# emb_header, [102,12,1536]
# len_header_token, [102]
# number_header, [16]
def forward(self, emb_question, len_question, emb_header, len_header_token, number_header, show_p_sc=False):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, number_header) # [b, header, dim]
bS = len(number_header)
mL_n = max(len_question)
# [bS, max_len_header, 100] * [bS, 100, mL_n] -> [bS, max_len_header, mL_n]
att_h = torch.bmm(encoded_header, self.W_att(encoded_question).transpose(1, 2))
# Penalty on blank parts
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att_h[b, :, l_n1:] = -10000000000
p_n = self.softmax_dim2(att_h)
if show_p_sc:
# p = [b, header, n]
if p_n.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001, figsize=(12,3.5))
# subplot(6,2,7)
subplot2grid((7,2), (3, 0), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_h in range(number_header[0]):
color_idx = i_h % len(_color)
plot(p_n[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('sc: p_n for each h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# p_n [ bS, max_len_header, mL_n] -> [ bS, max_len_header, mL_n, 1]
# wenc_n [ bS, mL_n, 100] -> [ bS, 1, mL_n, 100]
# -> [bS, max_len_header, mL_n, 100] -> [bS, max_len_header, 100]
c_n = torch.mul(p_n.unsqueeze(3), encoded_question.unsqueeze(1)).sum(dim=2)
vec = torch.cat([self.W_c(c_n), self.W_header(encoded_header)], dim=2)
score_select_column = self.sc_out(vec).squeeze(2) # [bS, max_len_header, 1] -> [bS, max_len_header]
score_select_column_softmax = self.softmax_dim_1(score_select_column)
# Penalty
max_len_header = max(number_header)
for b, l_header1 in enumerate(number_header):
if l_header1 < max_len_header:
score_select_column[b, l_header1:] = -10000000000
for b, l_header1 in enumerate(number_header):
if l_header1 < max_len_header:
score_select_column_softmax[b, l_header1:] = 0
return score_select_column,score_select_column_softmax
class SelectAggPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_agg_ops=-1, old=False):
super(SelectAggPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.sa_out = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, n_agg_ops)) # Fixed number of aggregation operator.
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
if old:
# for backwoard compatibility
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, pr_sc, show_p_sa=False):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
mL_n = max(len_question)
wenc_header_ob = encoded_header[list(range(bS)), pr_sc] # list, so one sample for each batch.
# [bS, question_len, 100] * [bS, 100, 1] -> [bS, question_len]
att = torch.bmm(self.W_att(encoded_question), wenc_header_ob.unsqueeze(2)).squeeze(2)
# Penalty on blank parts
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, l_n1:] = -10000000000
# [bS, question_len]
p = self.softmax_dim1(att)
if show_p_sa:
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,3)
cla()
plot(p[0].data.numpy(), '--rs', ms=7)
title('sa: nlu_weight')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [bS, question_len, 100] * ( [bS, question_len, 1] -> [bS, question_len, 100])
# -> [bS, question_len, 100] -> [bS, 100]
c_n = torch.mul(encoded_question, p.unsqueeze(2).expand_as(encoded_question)).sum(dim=1)
s_sa = self.sa_out(c_n)
s_sa_softmax = self.softmax_dim_1(s_sa)
return s_sa,s_sa_softmax
class WhereNumberPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, ):
super(WhereNumberPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att_h = nn.Linear(hidden_size, 1)
self.W_hidden = nn.Linear(hidden_size, num_layer * hidden_size)
self.W_cell = nn.Linear(hidden_size, num_layer * hidden_size)
self.W_att_n = nn.Linear(hidden_size, 1)
self.wn_out = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, self.mL_w + 1)) # max number (4 + 1)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, show_p_wn=False):
# Encode
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, max_len_header, dim]
bS = len(l_header)
max_len_question = max(len_question)
max_len_header = max(l_header)
# mL_h = max(len_header_token)
# (self-attention?) column Embedding?
# [B, max_len_header, 100] -> [B, max_len_header, 1] -> [B, max_len_header]
att_h = self.W_att_h(encoded_header).squeeze(2)
# Penalty
for b, l_header1 in enumerate(l_header):
if l_header1 < max_len_header:
att_h[b, l_header1:] = -10000000000
p_h = self.softmax_dim1(att_h)
if show_p_wn:
if p_h.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,5)
cla()
plot(p_h[0].data.numpy(), '--rs', ms=7)
title('wn: header_weight')
grid(True)
fig.canvas.draw()
show()
# input('Type Eenter to continue.')
# [B, max_len_header, 100] * [ B, max_len_header, 1] -> [B, max_len_header, 100] -> [B, 100]
c_header = torch.mul(encoded_header, p_h.unsqueeze(2)).sum(1)
# [B, 100] --> [B, 2*100] Enlarge because there are two layers.
hidden = self.W_hidden(c_header) # [B, 4, 200/2]
hidden = hidden.view(bS, self.num_layer * 2, int(
self.hidden_size / 2)) # [4, B, 100/2] # number_of_layer_layer * (bi-direction) # lstm input convention.
hidden = hidden.transpose(0, 1).contiguous()
cell = self.W_cell(c_header) # [B, 4, 100/2]
cell = cell.view(bS, self.num_layer * 2, int(self.hidden_size / 2)) # [4, B, 100/2]
cell = cell.transpose(0, 1).contiguous()
wenc_n = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=(hidden, cell),
last_only=False) # [b, n, dim]
att_n = self.W_att_n(wenc_n).squeeze(2) # [B, max_len, 100] -> [B, max_len, 1] -> [B, max_len]
# Penalty
for b, l_n1 in enumerate(len_question):
if l_n1 < max_len_question:
att_n[b, l_n1:] = -10000000000
p_n = self.softmax_dim1(att_n)
if show_p_wn:
if p_n.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,6)
cla()
plot(p_n[0].data.numpy(), '--rs', ms=7)
title('wn: nlu_weight')
grid(True)
fig.canvas.draw()
show()
# input('Type Enter to continue.')
# [B, mL_n, 100] *([B, mL_n] -> [B, mL_n, 1] -> [B, mL_n, 100] ) -> [B, 100]
c_n = torch.mul(wenc_n, p_n.unsqueeze(2).expand_as(wenc_n)).sum(dim=1)
s_wn = self.wn_out(c_n)
s_wn_softmax = self.softmax_dim_1(s_wn)
return s_wn,s_wn_softmax
# where column predict
class WhereColumnPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3):
super(WhereColumnPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.W_out = nn.Sequential(
nn.Tanh(), nn.Linear(2 * hidden_size, 1)
)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token,
l_header, show_p_wc, penalty=True):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
# attention
# wenc = [bS, mL, hidden_size]
# att = [bS, max_len_header, mL_n]
# att[b, i_h, j_n] = p(j_n| i_h)
att = torch.bmm(encoded_header, self.W_att(encoded_question).transpose(1, 2))
# penalty to blank part.
mL_n = max(len_question)
for b_n, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b_n, :, l_n1:] = -10000000000
# make p(j_n | i_h)
p = self.softmax_dim2(att)
if show_p_wc:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
# subplot(6,2,7)
subplot2grid((7,2), (3, 1), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_h in range(l_header[0]):
color_idx = i_h % len(_color)
plot(p[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wc: p_n for each h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# max nlu context vectors
# [bS, max_len_header, mL_n]*[bS, max_len_header, mL_n]
encoded_question = encoded_question.unsqueeze(1) # [ b, n, dim] -> [b, 1, n, dim]
p = p.unsqueeze(3) # [b, header, n] -> [b, header, n, 1]
c_n = torch.mul(encoded_question, p).sum(2) # -> [b, header, dim], c_n for each header.
y = torch.cat([self.W_c(c_n), self.W_header(encoded_header)], dim=2) # [b, header, 2*dim]
score = self.W_out(y).squeeze(2) # [b, header]
score[torch.isnan(score)] = 0
score_softmax = self.softmax_dim_1(score)
if penalty:
for b, l_header1 in enumerate(l_header):
score[b, l_header1:] = -1e+10
for b, l_header1 in enumerate(l_header):
score_softmax[b, l_header1:] = 0
return score,score_softmax
# where op predict
class WhereOpPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_cond_ops=3):
super(WhereOpPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.wo_out = nn.Sequential(
nn.Linear(2*hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, n_cond_ops)
)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token,
l_header, wn, wc, wenc_n=None, show_p_wo=False):
# Encode
if not wenc_n:
wenc_n = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
# wn
wenc_header_ob = [] # observed header
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
real = [encoded_header[b, col] for col in wc[b]]
pad = (self.mL_w - wn[b]) * [encoded_header[b, 0]] # this padding could be wrong. Test with zero padding later.
wenc_header_ob1 = torch.stack(real + pad) # It is not used in the loss function.
wenc_header_ob.append(wenc_header_ob1)
# list to [B, 4, dim] tensor.
wenc_header_ob = torch.stack(wenc_header_ob) # list to tensor.
wenc_header_ob = wenc_header_ob.to(device)
# [B, 1, mL_n, dim] * [B, 4, dim, 1]
# -> [B, 4, mL_n, 1] -> [B, 4, mL_n]
# multiplication bewteen NLq-tokens and selected column
att = torch.matmul(self.W_att(wenc_n).unsqueeze(1),
wenc_header_ob.unsqueeze(3)
).squeeze(3)
# Penalty for blank part.
mL_n = max(len_question)
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, :, l_n1:] = -10000000000
p = self.softmax_dim2(att) # p( n| selected_col )
if show_p_wo:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001)
# subplot(6,2,7)
subplot2grid((7,2), (5, 0), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_wn in range(self.mL_w):
color_idx = i_wn % len(_color)
plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wo: p_n for selected h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [B, 1, mL_n, dim] * [B, 4, mL_n, 1]
# --> [B, 4, mL_n, dim]
# --> [B, 4, dim]
c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2)
# [bS, 5-1, dim] -> [bS, 5-1, 3]
vec = torch.cat([self.W_c(c_n), self.W_header(wenc_header_ob)], dim=2)
s_wo = self.wo_out(vec)
s_wo_softmax = self.softmax_dim_1(s_wo)
return s_wo,s_wo_softmax
class WhereValuePredict_startend(nn.Module):
"""
Discriminative model
Get start and end.
Here, classifier for [ [투수], [팀1], [팀2], [연도], ...]
Input: Encoded nlu & selected column.
Algorithm: Encoded nlu & selected column. -> classifier -> mask scores -> ...
"""
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_cond_ops=4, old=False):
super(WhereValuePredict_startend, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.W_op = nn.Linear(n_cond_ops, hidden_size)
# self.W_n = nn.Linear(hidden_size, hidden_size)
if old:
self.wv_out = nn.Sequential(
nn.Linear(4 * hidden_size, 2)
)
else:
self.wv_out = nn.Sequential(
nn.Linear(4 * hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 2)
)
# self.wv_out = nn.Sequential(
# nn.Linear(3 * hidden_size, hidden_size),
# nn.Tanh(),
# nn.Linear(hidden_size, self.gdkL)
# )
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, wn, wc, wo, wenc_n=None, show_p_wv=False):
# Encode
if not wenc_n:
wenc_n, hout, cout = encode(self.enc_n, emb_question, len_question,
return_hidden=True,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
wenc_header_ob = [] # observed header
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
real = [encoded_header[b, col] for col in wc[b]]
pad = (self.mL_w - wn[b]) * [encoded_header[b, 0]] # this padding could be wrong. Test with zero padding later.
wenc_header_ob1 = torch.stack(real + pad) # It is not used in the loss function.
wenc_header_ob.append(wenc_header_ob1)
# list to [B, 4, dim] tensor.
wenc_header_ob = torch.stack(wenc_header_ob) # list to tensor.
wenc_header_ob = wenc_header_ob.to(device)
# Column attention
# [B, 1, mL_n, dim] * [B, 4, dim, 1]
# -> [B, 4, mL_n, 1] -> [B, 4, mL_n]
# multiplication bewteen NLq-tokens and selected column
att = torch.matmul(self.W_att(wenc_n).unsqueeze(1),
wenc_header_ob.unsqueeze(3)
).squeeze(3)
# Penalty for blank part.
mL_n = max(len_question)
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, :, l_n1:] = -10000000000
p = self.softmax_dim2(att) # p( n| selected_col )
if show_p_wv:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001)
# subplot(6,2,7)
subplot2grid((7,2), (5, 1), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_wn in range(self.mL_w):
color_idx = i_wn % len(_color)
plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wv: p_n for selected h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [B, 1, mL_n, dim] * [B, 4, mL_n, 1]
# --> [B, 4, mL_n, dim]
# --> [B, 4, dim]
c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2)
# Select observed headers only.
# Also generate one_hot vector encoding info of the operator
# [B, 4, dim]
wenc_op = []
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
wenc_op1 = torch.zeros(self.mL_w, self.n_cond_ops)
wo1 = wo[b]
idx_scatter = []
l_wo1 = len(wo1)
for i_wo11 in range(self.mL_w):
if i_wo11 < l_wo1:
wo11 = wo1[i_wo11]
idx_scatter.append([int(wo11)])
else:
idx_scatter.append([0]) # not used anyway
wenc_op1 = wenc_op1.scatter(1, torch.tensor(idx_scatter), 1)
wenc_op.append(wenc_op1)
# list to [B, 4, dim] tensor.
wenc_op = torch.stack(wenc_op) # list to tensor.
wenc_op = wenc_op.to(device)
# Now after concat, calculate logits for each token
# [bS, 5-1, 3*hidden_size] = [bS, 4, 300]
vec = torch.cat([self.W_c(c_n), self.W_header(wenc_header_ob), self.W_op(wenc_op)], dim=2)
# Make extended vector based on encoded nl token containing column and operator information.
# wenc_n = [bS, mL, 100]
# vec2 = [bS, 4, mL, 400]
vec1e = vec.unsqueeze(2).expand(-1,-1, mL_n, -1) # [bS, 4, 1, 300] -> [bS, 4, mL, 300]
wenc_ne = wenc_n.unsqueeze(1).expand(-1, 4, -1, -1) # [bS, 1, mL, 100] -> [bS, 4, mL, 100]
vec2 = torch.cat( [vec1e, wenc_ne], dim=3)
# now make logits
s_wv = self.wv_out(vec2) # [bS, 4, mL, 400] -> [bS, 4, mL, 2]
s_wv_softmax = self.softmax_dim_1(s_wv)
# penalty for spurious tokens
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
s_wv[b, :, l_n1:, :] = -10000000000
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
s_wv_softmax[b, :, l_n1:, :] = 0
return s_wv,s_wv_softmax
def Loss_selectwhere_startend_v2(score_select_column, s_sa, s_wn, s_wc, s_wo,
s_wv, ground_truth_select_column, g_sa, g_wn, g_wc, g_wo, g_wvi):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
# loss += Loss_sc(score_select_column, ground_truth_select_column)
# loss += Loss_sa(s_sa, g_sa)
# loss += Loss_wn(s_wn, g_wn)
# loss += Loss_wc(s_wc, g_wc)
# loss += Loss_wo(s_wo, g_wn, g_wo)
# loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss
def Loss_sw_se(score_select_column, s_sa, s_wn, s_wc, s_wo,
s_wv, ground_truth_select_column, g_sa, g_wn, g_wc, g_wo, g_wvi):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
loss += Loss_sc(score_select_column, ground_truth_select_column)
loss += Loss_sa(s_sa, g_sa)
loss += Loss_wn(s_wn, g_wn)
loss += Loss_wc(s_wc, g_wc)
loss += Loss_wo(s_wo, g_wn, g_wo)
loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss
def Loss_sc(s_sc, g_sc):
loss = F.cross_entropy(s_sc, torch.tensor(g_sc).to(device))
return loss
def Loss_sa(s_sa, g_sa):
loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device))
return loss
def Loss_wn(s_wn, g_wn):
loss = F.cross_entropy(s_wn, torch.tensor(g_wn).to(device))
return loss
def Loss_wc(s_wc, g_wc):
# Construct index matrix
bS, max_h_len = s_wc.shape
im = torch.zeros([bS, max_h_len]).to(device)
for b, g_wc1 in enumerate(g_wc):
for g_wc11 in g_wc1:
im[b, g_wc11] = 1.0
# Construct prob.
p = F.sigmoid(s_wc)
loss = F.binary_cross_entropy(p, im)
return loss
def Loss_wo(s_wo, g_wn, g_wo):
# Construct index matrix
loss = 0
for b, g_wn1 in enumerate(g_wn):
if g_wn1 == 0:
continue
g_wo1 = g_wo[b]
s_wo1 = s_wo[b]
loss += F.cross_entropy(s_wo1[:g_wn1], torch.tensor(g_wo1).to(device))
return loss
def Loss_wv_se(s_wv, g_wn, g_wvi):
"""
s_wv: [bS, 4, mL, 2], 4 stands for maximum # of condition, 2 tands for start & end logits.
g_wvi: [ [1, 3, 2], [4,3] ] (when B=2, wn(b=1) = 3, wn(b=2) = 2).
"""
loss = 0
# g_wvi = torch.tensor(g_wvi).to(device)
for b, g_wvi1 in enumerate(g_wvi):
# for i_wn, g_wvi11 in enumerate(g_wvi1):
g_wn1 = len(g_wvi1) # 有改动
# g_wn1 = g_wn[b] # 有改动
if g_wn1 == 0:
continue
g_wvi1 = torch.tensor(g_wvi1)[:g_wn1].to(device) # 有改动
g_st1 = g_wvi1[:,0]
g_ed1 = g_wvi1[:,1]
# loss from the start position
loss += F.cross_entropy(s_wv[b,:g_wn1,:,0], g_st1)
# print("st_login: ", s_wv[b,:g_wn1,:,0], g_st1, loss)
# loss from the end position
loss += F.cross_entropy(s_wv[b,:g_wn1,:,1], g_ed1)
# print("ed_login: ", s_wv[b,:g_wn1,:,1], g_ed1, loss)
return loss
# ========= Decoder-Layer ===========
class FT_s2s_1(nn.Module):
""" Decoder-Layer """
def __init__(self, input_size, hidden_size, num_layer, dropout, max_seq_length, n_cond_ops, n_agg_ops, old=False):
super(FT_s2s_1, self).__init__()
self.input_size = input_size # input_size
self.hidden_size = hidden_size # hidden_size
self.ls = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.n_agg_ops = n_agg_ops
self.n_where_num = 4
self.decoder_s2s = Decoder_s2s(input_size, hidden_size, num_layer, dropout, max_seq_length)
def forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs=None):
score = self.decoder_s2s(wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs)
return score
def EG_forward(self, wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size=4, beam_only=True):
""" EG-guided beam-search """
score = self.decoder_s2s.EG_forward(wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size, beam_only)
return score
class Decoder_s2s(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, max_seq_length=222, n_cond_ops=3):
super(Decoder_s2s, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL = max_seq_length
self.Tmax = 200
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.decode_pn = nn.LSTM(input_size=max_seq_length, hidden_size=hidden_size,
num_layers=num_layer, batch_first=True,
dropout=dropout)
self.W_s2s = nn.Linear(input_size, hidden_size)
self.W_pnt = nn.Linear(hidden_size, hidden_size)
self.wv_out = nn.Sequential(nn.Tanh(), nn.Linear(hidden_size, 1))
def forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs=None,):
# Encode
bS, mL_input, input_size = wenc_s2s.shape
# Now, pointer network.
ipnt = wenc_s2s.new_zeros(bS, 1, mL_input).to(device) # [B, 1, 200]
ipnt[:, 0, pnt_start_tok] = 1 # 27 is of start token under current tokenization scheme
# initial (current) pointer
cpnt = ipnt
# reshape wenc_s2s to incorporate T later
wenc_s2s = wenc_s2s.unsqueeze(1)
# h_0 and c_0 from cls_vec
# They are not bidirectional.
h_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
c_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
for i_layer in range(self.num_layer):
h_st = (2*i_layer)*self.hidden_size
h_ed = h_st + self.hidden_size
c_st = (2*i_layer+1)*self.hidden_size
c_ed = c_st + self.hidden_size
h_0[i_layer] = cls_vec[:, h_st:h_ed] # [ # of layers, batch, dim]
c_0[i_layer] = cls_vec[:, c_st:c_ed] # [ # of layers, batch, dim]
if g_pnt_idxs:
pnt_n = torch.zeros(bS, self.Tmax, mL_input).to(device) # one hot
# assign index
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
for t, g_pnt_idx in enumerate(g_pnt_idxs1):
pnt_n[b, t, g_pnt_idx] = 1
# Encode
dec_pn, _ = self.decode_pn(pnt_n, (h_0, c_0))
dec_pn = dec_pn.contiguous()
# [bS, T, input_size]
dec_pn = dec_pn.unsqueeze(2)
# Calculate score
s_wv = self.wv_out(
self.W_s2s(wenc_s2s)
+ self.W_pnt(dec_pn)
).squeeze(3) # [B, T, mL_input, dim] -> [B, T, mL_input, 1] -> [B, T, mL_input]
# s_wv = [B, 4, T, mL_n] = [batch, conds, token idx, score]
# penalty
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv[b, :, l_input1:] = -10000000000
else:
t = 0
s_wv_list = []
cpnt_h = (h_0, c_0)
while t < self.Tmax:
dec_pn, cpnt_h = self.decode_pn(cpnt, cpnt_h) # lstm
# [B, 1, 100] -> [B, 1, 1, 100]
dec_pn = dec_pn.unsqueeze(2)
# [bS, T, input_size]
# get score
s_wv1 = self.wv_out(
self.W_s2s(wenc_s2s) # [B, 1, mL_input, dim]
+ self.W_pnt(dec_pn) # [B, T=1, 1, dim] Now, T=1
).squeeze(3)
# s_wv = [B, 4, 1, mL_n, 1] = [batch, conds, token idx, score]
# -> [B, 4, mL_n]
# Masking --
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv1[b, :, l_input1:] = -10000000000
# Collect score--
s_wv_list.append(s_wv1)
# [B, 1, mL_input] -> [B, mL_n] -> [bS*(5-1)]
# (max_val, max_indices)
_val, pnt_n = s_wv1.view(bS, -1).max(dim=1)
# formatting pnt_n as a one-hot input.
cpnt = torch.zeros(bS, mL_input).to(device)
# cpnt = cpnt.scatter_(dim=1, index=pnt_n.unsqueeze(1), src=1).to(device)
cpnt = cpnt.scatter_(1, pnt_n.unsqueeze(1), 1)
cpnt = cpnt.unsqueeze(1) # --> [B * 4, 1, 200]
t += 1
s_wv = torch.stack(s_wv_list, 1) # [B,
s_wv = s_wv.squeeze(2) #
# # Following lines seems to be unnecessary.
# # Penalty to blank parts
# for b, l_input1 in enumerate(l_input):
# if l_input1 < mL_input:
# s_wv[b, :, l_input1:] = -10000000000
return s_wv
def EG_forward(self, wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size, beam_only=True):
# Encode
bS, mL_input, input_size = wenc_s2s.shape
# reshape wenc_s2s to incorperate T later
wenc_s2s = wenc_s2s.unsqueeze(1)
# h_0 and c_0 from cls_vec
# They are not bidirectional.
h_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
c_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
for i_layer in range(self.num_layer):
h_st = (2*i_layer)*self.hidden_size
h_ed = h_st + self.hidden_size
c_st = (2*i_layer+1)*self.hidden_size
c_ed = c_st + self.hidden_size
h_0[i_layer] = cls_vec[:, h_st:h_ed] # [ # of layers, batch, dim]
c_0[i_layer] = cls_vec[:, c_st:c_ed] # [ # of layers, batch, dim]
# initial (current) pointer
pnt_list_beam = []
cpnt_beam = []
cpnt_h_beam = []
for i_beam in range(beam_size):
pnt_list_beam1 = []
for b in range(bS):
pnt_list_beam1.append( [ [pnt_start_tok], 0] )
pnt_list_beam.append(pnt_list_beam1)
# initisl cpnt
# Now, initialize pointer network.
ipnt = wenc_s2s.new_zeros(bS, 1, mL_input).to(device) # [B, 1, 200]
# Distort ipnt by i_bam on purpose to avoid initial duplication of beam-search
ipnt[:, 0, pnt_start_tok] = 1 # 27 is of start token under current tokenization scheme
cpnt_beam.append(ipnt)
cpnt_h_beam.append( (h_0, c_0) )
t = 0
while t < self.Tmax:
# s_wv1_beam = []
candidates = [ [] for b in range(bS) ] # [bS]
# Generate beam
for i_beam, cpnt in enumerate(cpnt_beam):
cpnt_h = cpnt_h_beam[i_beam]
pnt_list_beam1 = pnt_list_beam[i_beam]
dec_pn, cpnt_h = self.decode_pn(cpnt, cpnt_h) # lstm
cpnt_h_beam[i_beam] = cpnt_h
# [B, 1, 100] -> [B, 1, 1, 100]
dec_pn = dec_pn.unsqueeze(2)
# [bS, T, input_size]
# get score
s_wv1 = self.wv_out(
self.W_s2s(wenc_s2s) # [B, 1, mL_input, dim]
+ self.W_pnt(dec_pn) # [B, T=1, 1, dim] Now, T=1
).squeeze(3)
# s_wv = [B, 4, 1, mL_n, 1] = [batch, conds, token idx, score]
# -> [B, 4, mL_n]
# Masking --
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv1[b, :, l_input1:] = -10000000000
# Get the candidates only among the input space.
prob, idxs = F.softmax(s_wv1.view(bS, -1), dim=1).topk(dim=1, k=max(l_input))
log_prob = torch.log(prob) # [bS, beam_size]
for b, log_prob1 in enumerate(log_prob):
pnt_list11, score = pnt_list_beam1[b]
for i_can, log_prob11 in enumerate(log_prob1):
# no update if last token was the end-token
previous_pnt = pnt_list11[-1]
if previous_pnt== pnt_end_tok:
new_seq = pnt_list11
new_score = score
else:
new_seq = pnt_list11 + [idxs[b][i_can].item()]
new_score = score + log_prob11.item()
_candidate = [new_seq, new_score]
candidates[b].append(_candidate)
# Execution-guided beam filtering
for b, candidates1 in enumerate(candidates):
new_pnt_list_batch1 = sorted(candidates1, key=lambda list1: list1[-1], reverse=True)
count = 0
selected_candidates1 = []
for new_pnt_list_batch11 in new_pnt_list_batch1:
if new_pnt_list_batch11 not in selected_candidates1:
if beam_only:
selected_candidates1.append(new_pnt_list_batch11)
pnt_list_beam[count][b] = new_pnt_list_batch11
count +=1
else:
# Need to be modified here.
executable = False
testable = False
pr_i_vg_list, pr_i_vg_sub_list = gen_i_vg_from_pnt_idxs([new_pnt_list_batch11[0]], [i_sql_vocab[b]], [i_nlu[b]],
[i_hds[b]])
pr_sql_q_s2s, pr_sql_i = gen_sql_q_from_i_vg([tokens[b]], [nlu[b]], [nlu_t[b]], [hds[b]], [tt_to_t_idx[b]],
pnt_start_tok, pnt_end_tok,
[new_pnt_list_batch11[0]], pr_i_vg_list, pr_i_vg_sub_list)
# check testability from select-clause
try:
# check whether basic elements presents in pr_sql_i
# If so, it is testable.
idx_agg = pr_sql_i[0]["agg"]
idx_sel = pr_sql_i[0]["sel"]
testable = True
except:
testable = False
pass
# check the presence of conds
if testable:
try:
conds = pr_sql_i[0]["conds"]
except:
conds = []
try:
pr_ans1 = engine.execute(tb[b]['id'], idx_sel, idx_agg, conds)
executable = bool(pr_ans1)
except:
executable = False
#
if testable:
if executable:
add_candidate = True
else:
add_candidate = False
else:
add_candidate = True
if add_candidate:
selected_candidates1.append(new_pnt_list_batch11)
pnt_list_beam[count][b] = new_pnt_list_batch11
count += 1
if count == beam_size:
break
if count < beam_size:
# not executable at all..
# add junk sequence.
for i_junk in range(count, beam_size):
pnt_list_beam[i_junk][b] = [[pnt_end_tok],-9999999]
# generate cpnt
# formatting pnt_n as a one-hot input.
for i_beam in range(beam_size):
cpnt = torch.zeros(bS, mL_input).to(device)
# cpnt = cpnt.scatter_(dim=1, index=pnt_n.unsqueeze(1), src=1).to(device)
idx_batch = [seq_score[0][-1] for seq_score in pnt_list_beam[i_beam]]
pnt_n = torch.tensor(idx_batch).to(device)
cpnt = cpnt.scatter_(1, pnt_n.unsqueeze(1), 1)
cpnt = cpnt.unsqueeze(1) # --> [B, t=1, mL_input]
cpnt_beam[i_beam] = cpnt
t += 1
# Generate best pr_pnt_list, p_tot
pr_pnt_idxs = []
p_list = []
for b in range(bS):
pnt_list_beam_best = pnt_list_beam[0]
pr_pnt_idxs.append(pnt_list_beam_best[b][0])
p_list.append( pnt_list_beam_best[b][1])
return pr_pnt_idxs, p_list, pnt_list_beam
# ============= Shallow-Layer ===============
class FT_Scalar_1(nn.Module):
""" Shallow-Layer """
def __init__(self, input_size, hidden_size, num_layer, dropout, n_cond_ops, n_agg_ops, old=False):
super(FT_Scalar_1, self).__init__()
self.input_size = input_size # input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.n_agg_ops = n_agg_ops
self.n_where_num = 4
def scp(self, wemb_h, l_header):
bS, max_header_len, _ = wemb_h.shape
# s_sc
s_sc = torch.zeros(bS, max_header_len).to(device)
s_sc[:, :] = wemb_h[:, :, 0] # s_sc = [B, max_header length, 1]
# s_sc[:,:] = F.tanh(wemb_h[:,:,0]) # s_sc = [B, max_header length, 1]
# s_sc = s_sc.squeeze(2)
# masking
# print(f"s_sc {s_sc}")
for b, l_header1 in enumerate(l_header):
s_sc[b, l_header1:] = -9999999999.0
return s_sc
def sap(self, wemb_h, pr_sc, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
# select of aggregation operator
s_sa = torch.zeros([bS, self.n_agg_ops]).to(device)
for b, pr_sc1 in enumerate(pr_sc):
s_sa[b,:] = wemb_h[b,pr_sc1,idx_st:idx_ed]
return s_sa
def wnp(self, cls_vec):
bS = cls_vec.shape[0]
# [B,hidden_size] -> [B, n_where_num+1]
s_wn = torch.zeros(bS, (self.n_where_num + 1)).to(device)
s_wn[:, :] = cls_vec[:, 0:(self.n_where_num + 1)]
return s_wn
def wcp(self, wemb_h, l_header, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
s_wc = torch.zeros(bS, max_header_len, 1).to(device)
s_wc[:, :, :] = wemb_h[:, :, idx_st:idx_ed]
s_wc = s_wc.squeeze(2) # [B, max_header_length]
# masking
for b, l_header1 in enumerate(l_header):
s_wc[b, l_header1:] = -99999999999.0
return s_wc
def wop(self, wemb_h, pr_wc, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
s_wo = torch.zeros([bS, self.n_where_num, self.n_cond_ops]).to(device)
for b, pr_wc1 in enumerate(pr_wc):
if len(pr_wc1) > 0:
s_wo[b, 0:len(pr_wc1), :] = wemb_h[b, pr_wc1, idx_st:idx_ed]
else:
pass
return s_wo
def wvp(self, emb_question, len_question, pr_wc):
bS, _, _ = emb_question.shape
s_wv = torch.zeros([bS, self.n_where_num, max(len_question), 2]).to(device)
for b, pr_wc1 in enumerate(pr_wc):
if len(pr_wc1) > 0:
# start logit
s_wv[b, 0:len(pr_wc1), :, 0] = emb_question[b, :, pr_wc1].transpose(0, 1)
# end logit
s_wv[b, 0:len(pr_wc1), :, 1] = emb_question[b, :, [pr_wc11 + 100 for pr_wc11 in pr_wc1]].transpose(0, 1)
else:
pass
# masking
# penalty for spurious tokens
for b, l_n1 in enumerate(len_question):
if l_n1 < max(len_question):
s_wv[b, :, l_n1:, :] = -1e+11
return s_wv
def forward(self, emb_question, len_question, wemb_h, l_header, cls_vec,
g_sc=None, g_sa=None, g_wn=None, g_wc=None, g_wo=None, g_wvi=None,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
# emb_question = [B, max_nlu_token_length, hidden_size] # here, # of target_layer is fixed to 1.
# wemb_h = [B, max_header #, hidden_size]
s_sc = self.scp(wemb_h, l_header)
if g_sc:
pr_sc = g_sc
else:
pr_sc = pred_sc(s_sc)
# s_sa
idx_st = 1
idx_ed = 1 + self.n_agg_ops
s_sa = self.sap(wemb_h, pr_sc, idx_st, idx_ed)
if g_sa:
pr_sa = g_sa
else:
pr_sa = pred_sa(s_sa)
# where_number
s_wn = self.wnp(cls_vec)
if g_wn:
pr_wn = g_wn
else:
pr_wn = pred_wn(s_wn)
# wc
idx_st = idx_ed+1
idx_ed = idx_st+1
s_wc = self.wcp(wemb_h, l_header, idx_st, idx_ed)
if g_wc:
pr_wc = g_wc
else:
pr_wc = pred_wherecolumn(pr_wn, s_wc)
# wo
idx_st = idx_ed+1
idx_ed = idx_st + self.n_cond_ops
s_wo = self.wop(wemb_h, pr_wc, idx_st, idx_ed)
if g_wo:
pr_wo = g_wo
else:
pr_wo = pred_wo(pr_wn, s_wo)
# wv
# s_wv = [bS, 4, mL, 2]
s_wv = self.wvp(emb_question, len_question, pr_wc)
# print(s_wv)
# s_wv = F.tanh(s_wv)
return s_sc, s_sa, s_wn, s_wc, s_wo, s_wv
def forward_EG(self, emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_tt, tt_to_t_idx, nlu,
beam_size=4):
"""
Execution-guided beam decoding.
Essentially identical with that of NL2SQL Layer.
"""
# Select-clause
prob_sca, pr_sc_best, pr_sa_best, \
p_sc_best, p_sa_best, p_select \
= self.EG_decoding_select(wemb_h, l_header, tb, beam_size=beam_size)
# Where-clause
prob_w, prob_wn_w, pr_wn_based_on_prob, pr_sql_i, pr_wvi_best, \
p_where, p_wn_best, p_wc_best, p_wo_best, p_wvi_best \
= self.EG_decoding_where(emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_tt, tt_to_t_idx, nlu,
pr_sc_best, pr_sa_best,
beam_size=4)
p_tot = cal_prob_tot(p_select, p_where)
return pr_sc_best, pr_sa_best, pr_wn_based_on_prob, pr_wvi_best, \
pr_sql_i, p_tot, p_select, p_where, p_sc_best, p_sa_best, \
p_wn_best, p_wc_best, p_wo_best, p_wvi_best
def EG_decoding_select(self, wemb_h, l_header, tb,
beam_size=4, show_p_sc=False, show_p_sa=False):
# sc
s_sc = self.scp(wemb_h, l_header)
prob_sc = F.softmax(s_sc, dim=-1)
bS, mcL = s_sc.shape
# minimum_header_length = min(l_header)
# beam_size = minimum_header_length if beam_size > minimum_header_length else beam_size
# sa
# Construct all possible sc_sa_score
prob_sc_sa = torch.zeros([bS, beam_size, self.n_agg_ops]).to(device)
score_sc_sa = torch.zeros([bS, beam_size, self.n_agg_ops]).to(device)
prob_sca = torch.zeros_like(prob_sc_sa).to(device)
# get the top-k indices. pr_sc_beam = [B, beam_size]
pr_sc_beam = pred_sc_beam(s_sc, beam_size)
# calculate and predict s_sa.
idx_st = 1
idx_ed = 1 + self.n_agg_ops
for i_beam in range(beam_size):
pr_sc = list(array(pr_sc_beam)[:, i_beam])
s_sa = self.sap(wemb_h, pr_sc, idx_st, idx_ed)
prob_sa = F.softmax(s_sa, dim=-1)
prob_sc_sa[:, i_beam, :] = prob_sa
score_sc_sa[:, i_beam, :] = s_sa
prob_sc_selected = prob_sc[range(bS), pr_sc] # [B]
prob_sca[:, i_beam, :] = (prob_sa.t() * prob_sc_selected).t()
# [mcL, B] * [B] -> [mcL, B] (element-wise multiplication)
# [mcL, B] -> [B, mcL]
# Calculate the dimension of tensor
# tot_dim = len(prob_sca.shape)
idxs = topk_multi_dim(torch.tensor(prob_sca), n_topk=beam_size, batch_exist=True)
# Now as sc_idx is already sorted, re-map them properly.
idxs = remap_sc_idx(idxs, pr_sc_beam) # [sc_beam_idx, sa_idx] -> [sc_idx, sa_idx]
idxs_arr = array(idxs)
# [B, beam_size, remainig dim]
# idxs[b][0] gives first probable [sc_idx, sa_idx] pairs.
# idxs[b][1] gives of second.
# Calculate prob_sca, a joint probability
beam_idx_sca = [0] * bS
beam_meet_the_final = [False] * bS
while True:
pr_sc = idxs_arr[range(bS), beam_idx_sca, 0]
pr_sa = idxs_arr[range(bS), beam_idx_sca, 1]
# map index properly
check = check_sc_sa_pairs(tb, pr_sc, pr_sa)
if sum(check) == bS:
break
else:
for b, check1 in enumerate(check):
if not check1: # wrong pair
beam_idx_sca[b] += 1
if beam_idx_sca[b] >= beam_size:
beam_meet_the_final[b] = True
beam_idx_sca[b] -= 1
else:
beam_meet_the_final[b] = True
if sum(beam_meet_the_final) == bS:
break
# Now pr_sc, pr_sa are properly predicted.
pr_sc_best = list(pr_sc)
pr_sa_best = list(pr_sa)
# output for later analysis.
p_sc_best = cal_prob_sc(s_sc, pr_sc_best)
p_sa_best = cal_prob_sa(score_sc_sa[range(bS), beam_idx_sca, :].squeeze(1), pr_sa_best)
p_select = cal_prob_select(p_sc_best, p_sa_best)
# p_select = prob_sca[range(bS),beam_idx_sca,pr_sa_best].detach().to('cpu').numpy()
return prob_sca, pr_sc_best, pr_sa_best, p_sc_best, p_sa_best, p_select
def EG_decoding_where(self, emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_wp_t, tt_to_t_idx, nlu,
pr_sc_best, pr_sa_best,
beam_size=4, show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
bS, max_header_len, _ = wemb_h.shape
# Now, Where-clause beam search.
idx_st = 1
idx_ed = 1 + self.n_agg_ops
s_wn = self.wnp(cls_vec)
prob_wn = F.softmax(s_wn, dim=-1).detach().to('cpu').numpy()
# Found "executable" most likely 4(=max_num_of_conditions) where-clauses.
# wc
idx_st = idx_ed + 1
idx_ed = idx_st + 1
s_wc = self.wcp(wemb_h, l_header, idx_st, idx_ed)
prob_wc = torch.sigmoid(s_wc).detach().to('cpu').numpy()
# pr_wc_sorted_by_prob = pred_wc_sorted_by_prob(s_wc)
# get max_wn # of most probable columns & their prob.
pr_wn_max = [self.n_where_num] * bS
pr_wc_max = pred_wherecolumn(pr_wn_max, s_wc) # if some column do not have executable where-claouse, omit that column
prob_wc_max = zeros([bS, self.n_where_num])
for b, pr_wc_max1 in enumerate(pr_wc_max):
prob_wc_max[b, :] = prob_wc[b, pr_wc_max1]
# get most probable n_where_num where-clouses
# wo
idx_st = idx_ed + 1
idx_ed = idx_st + self.n_cond_ops
s_wo_max = self.wop(wemb_h, pr_wc_max, idx_st, idx_ed)
prob_wo_max = F.softmax(s_wo_max, dim=-1).detach().to('cpu').numpy()
# [B, n_where_num, n_cond_op]
pr_wvi_beam_op_list = []
prob_wvi_beam_op_list = []
prob_wvi_beam_st_op_list = []
prob_wvi_beam_ed_op_list = []
# To re-use code, repeat the calculation unnecessarily.
for i_op in range(self.n_cond_ops - 1):
pr_wo_temp = [[i_op] * self.n_where_num] * bS
# wv
s_wv = self.wvp(emb_question, len_question, pr_wc_max)
prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()
# prob_wv
pr_wvi_beam, prob_wvi_beam, prob_wvi_beam_st, prob_wvi_beam_ed = pred_wvi_se_beam(self.n_where_num, s_wv, beam_size)
pr_wvi_beam_op_list.append(pr_wvi_beam)
prob_wvi_beam_op_list.append(prob_wvi_beam)
prob_wvi_beam_st_op_list.append(prob_wvi_beam_st)
prob_wvi_beam_ed_op_list.append(prob_wvi_beam_ed)
# pr_wvi_beam = [B, n_where_num, k_logit**2 [st, ed] paris]
# pred_wv_beam
# Calculate joint probability of where-clause
# prob_w = [batch, wc, wo, wv] = [B, n_where_num, n_cond_op, n_pairs]
n_wv_beam_pairs = prob_wvi_beam.shape[2]
prob_w = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wc_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wo_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wvi_st_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wvi_ed_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
for b in range(bS):
for i_wn in range(self.n_where_num):
for i_op in range(self.n_cond_ops - 1): # do not use final one
p_wc = prob_wc_max[b, i_wn]
for i_wv_beam in range(n_wv_beam_pairs):
# i_wc = pr_wc_max[b][i_wn] # already done
p_wo = prob_wo_max[b, i_wn, i_op]
p_wv = prob_wvi_beam_op_list[i_op][b, i_wn, i_wv_beam]
prob_w[b, i_wn, i_op, i_wv_beam] = p_wc * p_wo * p_wv
prob_wc_dupl[b, i_wn, i_op, i_wv_beam] = p_wc
prob_wo_dupl[b, i_wn, i_op, i_wv_beam] = p_wo
p_wv_st = prob_wvi_beam_st_op_list[i_op][b, i_wn, i_wv_beam]
p_wv_ed = prob_wvi_beam_ed_op_list[i_op][b, i_wn, i_wv_beam]
prob_wvi_st_dupl[b, i_wn, i_op, i_wv_beam] = p_wv_st
prob_wvi_ed_dupl[b, i_wn, i_op, i_wv_beam] = p_wv_ed
# Perform execution guided decoding
conds_max = []
prob_conds_max = []
# while len(conds_max) < self.n_where_num:
idxs = topk_multi_dim(torch.tensor(prob_w), n_topk=beam_size, batch_exist=True)
# idxs = [B, i_wc_beam, i_op, i_wv_pairs]
# Construct conds1. Collect only executable one. It is descending order of the probability.
pr_wvi_max = []
p_wc_max = []
p_wo_max = []
p_wvi_max = []
for b, idxs1 in enumerate(idxs):
conds_max1 = []
prob_conds_max1 = []
pr_wvi1_max = []
p_wc1_max = []
p_wo1_max = []
p_wvi1_max = []
for i_wn, idxs11 in enumerate(idxs1):
i_wc = pr_wc_max[b][idxs11[0]]
i_op = idxs11[1]
wvi = pr_wvi_beam_op_list[i_op][b][idxs11[0]][idxs11[2]]
# idx11[0]
# get wv_str
temp_pr_wv_str, _ = convert_pred_wvi_to_string([[wvi]], [nlu_t[b]], [nlu_wp_t[b]], [tt_to_t_idx[b]],
[nlu[b]])
merged_wv11 = merge_wv_t1_eng(temp_pr_wv_str[0][0], nlu[b])
conds11 = [i_wc, i_op, merged_wv11]
prob_conds11 = prob_w[b, idxs11[0], idxs11[1], idxs11[2]]
p_wc11_max = prob_wc_dupl[b, idxs11[0], idxs11[1], idxs11[2]]
p_wo11_max = prob_wo_dupl[b, idxs11[0], idxs11[1], idxs11[2]]
p_wvi11_max = [ prob_wvi_st_dupl[b, idxs11[0], idxs11[1], idxs11[2]],
prob_wvi_ed_dupl[b, idxs11[0], idxs11[1], idxs11[2]] ]
# test execution
# print(nlu[b])
# print(tb[b]['id'], tb[b]['types'], pr_sc[b], pr_sa[b], [conds11])
pr_ans = engine.execute(tb[b]['id'], pr_sc_best[b], pr_sa_best[b], [conds11])
if bool(pr_ans):
# pr_ans is not empty!
conds_max1.append(conds11)
prob_conds_max1.append(prob_conds11)
pr_wvi1_max.append(wvi)
p_wc1_max.append(p_wc11_max)
p_wo1_max.append(p_wo11_max)
p_wvi1_max.append(p_wvi11_max)
conds_max.append(conds_max1)
prob_conds_max.append(prob_conds_max1)
pr_wvi_max.append(pr_wvi1_max)
p_wc_max.append(p_wc1_max)
p_wo_max.append(p_wo1_max)
p_wvi_max.append(p_wvi1_max)
# May need to do more exhuastive search?
# i.e. up to.. getting all executable cases.
# Calculate total probability to decide the number of where-clauses
pr_sql_i = []
prob_wn_w = [] # total where-clause probability
pr_wn_based_on_prob = []
pr_wvi_best = []
p_wc = []
p_wo = []
p_wvi = []
for b, prob_wn1 in enumerate(prob_wn):
max_executable_wn1 = len(conds_max[b])
prob_wn_w1 = []
prob_wn_w1.append(prob_wn1[0]) # wn=0 case.
for i_wn in range(max_executable_wn1):
prob_wn_w11 = prob_wn1[i_wn + 1] * prob_conds_max[b][i_wn]
prob_wn_w1.append(prob_wn_w11)
pr_wn_based_on_prob.append(argmax(prob_wn_w1))
prob_wn_w.append(prob_wn_w1)
pr_sql_i1 = {'agg': pr_sa_best[b], 'sel': pr_sc_best[b], 'conds': conds_max[b][:pr_wn_based_on_prob[b]]}
pr_wvi_best1 = pr_wvi_max[b][:pr_wn_based_on_prob[b]]
pr_sql_i.append(pr_sql_i1)
pr_wvi_best.append(pr_wvi_best1)
p_wc.append( p_wc_max[b][:pr_wn_based_on_prob[b]] )
p_wo.append( p_wo_max[b][:pr_wn_based_on_prob[b]] )
p_wvi.append( p_wvi_max[b][:pr_wn_based_on_prob[b]] )
# s_wv = [B, n_where_num, max_nlu_tokens, 2]
p_wn = cal_prob_wn(s_wn, pr_wn_based_on_prob)
p_where = cal_prob_where(p_wn, p_wc, p_wo, p_wvi)
return prob_w, prob_wn_w, pr_wn_based_on_prob, pr_sql_i, pr_wvi_best, \
p_where, p_wn, p_wc, p_wo, p_wvi
def Loss_s2s(score, g_pnt_idxs):
"""
score = [B, T, max_seq_length]
"""
# WHERE string part
loss = 0
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
ed = len(g_pnt_idxs1) - 1
score_part = score[b, :ed]
loss += F.cross_entropy(score_part, torch.tensor(g_pnt_idxs1[1:]).to(device)) # +1 shift.
return loss
| [
"torch.mul",
"torch.log",
"torch.nn.Tanh",
"torch.nn.Softmax",
"torch.nn.LSTM",
"torch.stack",
"torch.nn.functional.binary_cross_entropy",
"torch.sigmoid",
"torch.nn.functional.sigmoid",
"torch.tensor",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.functional.cross_entropy",
"torch.zeros",
"torch.zeros_like",
"torch.isnan",
"torch.nn.functional.softmax",
"torch.cat"
] | [((40296, 40311), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['s_wc'], {}), '(s_wc)\n', (40305, 40311), True, 'import torch.nn.functional as F\n'), ((40323, 40352), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['p', 'im'], {}), '(p, im)\n', (40345, 40352), True, 'import torch.nn.functional as F\n'), ((245, 270), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (268, 270), False, 'import torch\n'), ((4182, 4205), 'torch.nn.functional.softmax', 'F.softmax', (['s_sc'], {'dim': '(-1)'}), '(s_sc, dim=-1)\n', (4191, 4205), True, 'import torch.nn.functional as F\n'), ((13039, 13074), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (13048, 13074), True, 'import torch.nn as nn\n'), ((13094, 13129), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (13103, 13129), True, 'import torch.nn as nn\n'), ((13154, 13189), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (13163, 13189), True, 'import torch.nn as nn\n'), ((13297, 13314), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (13307, 13314), True, 'import torch.nn as nn\n'), ((13343, 13360), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (13353, 13360), True, 'import torch.nn as nn\n'), ((13390, 13408), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (13400, 13408), True, 'import torch.nn as nn\n'), ((16969, 17004), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (16978, 17004), True, 'import torch.nn as nn\n'), ((17266, 17283), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (17276, 17283), True, 'import torch.nn as nn\n'), ((17312, 17329), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (17322, 17329), True, 'import torch.nn as nn\n'), ((17359, 17377), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (17369, 17377), True, 'import torch.nn as nn\n'), ((20151, 20176), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (20160, 20176), True, 'import torch.nn as nn\n'), ((20201, 20248), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(num_layer * hidden_size)'], {}), '(hidden_size, num_layer * hidden_size)\n', (20210, 20248), True, 'import torch.nn as nn\n'), ((20271, 20318), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(num_layer * hidden_size)'], {}), '(hidden_size, num_layer * hidden_size)\n', (20280, 20318), True, 'import torch.nn as nn\n'), ((20343, 20368), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (20352, 20368), True, 'import torch.nn as nn\n'), ((20615, 20632), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (20625, 20632), True, 'import torch.nn as nn\n'), ((20661, 20678), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (20671, 20678), True, 'import torch.nn as nn\n'), ((20708, 20726), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (20718, 20726), True, 'import torch.nn as nn\n'), ((24649, 24684), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (24658, 24684), True, 'import torch.nn as nn\n'), ((24704, 24739), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (24713, 24739), True, 'import torch.nn as nn\n'), ((24764, 24799), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (24773, 24799), True, 'import torch.nn as nn\n'), ((24928, 24945), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (24938, 24945), True, 'import torch.nn as nn\n'), ((24974, 24991), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (24984, 24991), True, 'import torch.nn as nn\n'), ((25021, 25039), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (25031, 25039), True, 'import torch.nn as nn\n'), ((28380, 28415), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (28389, 28415), True, 'import torch.nn as nn\n'), ((28435, 28470), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (28444, 28470), True, 'import torch.nn as nn\n'), ((28495, 28530), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (28504, 28530), True, 'import torch.nn as nn\n'), ((28728, 28745), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (28738, 28745), True, 'import torch.nn as nn\n'), ((28774, 28791), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (28784, 28791), True, 'import torch.nn as nn\n'), ((28822, 28840), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (28832, 28840), True, 'import torch.nn as nn\n'), ((29948, 29975), 'torch.stack', 'torch.stack', (['wenc_header_ob'], {}), '(wenc_header_ob)\n', (29959, 29975), False, 'import torch\n'), ((32864, 32899), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (32873, 32899), True, 'import torch.nn as nn\n'), ((32919, 32954), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (32928, 32954), True, 'import torch.nn as nn\n'), ((32979, 33014), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (32988, 33014), True, 'import torch.nn as nn\n'), ((33035, 33069), 'torch.nn.Linear', 'nn.Linear', (['n_cond_ops', 'hidden_size'], {}), '(n_cond_ops, hidden_size)\n', (33044, 33069), True, 'import torch.nn as nn\n'), ((33645, 33662), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (33655, 33662), True, 'import torch.nn as nn\n'), ((33691, 33708), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (33701, 33708), True, 'import torch.nn as nn\n'), ((33739, 33757), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (33749, 33757), True, 'import torch.nn as nn\n'), ((34855, 34882), 'torch.stack', 'torch.stack', (['wenc_header_ob'], {}), '(wenc_header_ob)\n', (34866, 34882), False, 'import torch\n'), ((37231, 37251), 'torch.stack', 'torch.stack', (['wenc_op'], {}), '(wenc_op)\n', (37242, 37251), False, 'import torch\n'), ((37897, 37931), 'torch.cat', 'torch.cat', (['[vec1e, wenc_ne]'], {'dim': '(3)'}), '([vec1e, wenc_ne], dim=3)\n', (37906, 37931), False, 'import torch\n'), ((41324, 41369), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['s_wv[b, :g_wn1, :, 0]', 'g_st1'], {}), '(s_wv[b, :g_wn1, :, 0], g_st1)\n', (41339, 41369), True, 'import torch.nn.functional as F\n'), ((41484, 41529), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['s_wv[b, :g_wn1, :, 1]', 'g_ed1'], {}), '(s_wv[b, :g_wn1, :, 1], g_ed1)\n', (41499, 41529), True, 'import torch.nn.functional as F\n'), ((44088, 44209), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'max_seq_length', 'hidden_size': 'hidden_size', 'num_layers': 'num_layer', 'batch_first': '(True)', 'dropout': 'dropout'}), '(input_size=max_seq_length, hidden_size=hidden_size, num_layers=\n num_layer, batch_first=True, dropout=dropout)\n', (44095, 44209), True, 'import torch.nn as nn\n'), ((44293, 44327), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (44302, 44327), True, 'import torch.nn as nn\n'), ((44349, 44384), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (44358, 44384), True, 'import torch.nn as nn\n'), ((62898, 62921), 'torch.nn.functional.softmax', 'F.softmax', (['s_sc'], {'dim': '(-1)'}), '(s_sc, dim=-1)\n', (62907, 62921), True, 'import torch.nn.functional as F\n'), ((4992, 5015), 'torch.nn.functional.softmax', 'F.softmax', (['s_sa'], {'dim': '(-1)'}), '(s_sa, dim=-1)\n', (5001, 5015), True, 'import torch.nn.functional as F\n'), ((5453, 5475), 'torch.tensor', 'torch.tensor', (['prob_sca'], {}), '(prob_sca)\n', (5465, 5475), False, 'import torch\n'), ((9919, 9939), 'torch.tensor', 'torch.tensor', (['prob_w'], {}), '(prob_w)\n', (9931, 9939), False, 'import torch\n'), ((13226, 13235), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (13233, 13235), True, 'import torch.nn as nn\n'), ((13237, 13266), 'torch.nn.Linear', 'nn.Linear', (['(2 * hidden_size)', '(1)'], {}), '(2 * hidden_size, 1)\n', (13246, 13266), True, 'import torch.nn as nn\n'), ((17041, 17076), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (17050, 17076), True, 'import torch.nn as nn\n'), ((17114, 17123), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (17121, 17123), True, 'import torch.nn as nn\n'), ((17161, 17194), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'n_agg_ops'], {}), '(hidden_size, n_agg_ops)\n', (17170, 17194), True, 'import torch.nn as nn\n'), ((17459, 17494), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (17468, 17494), True, 'import torch.nn as nn\n'), ((17523, 17558), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (17532, 17558), True, 'import torch.nn as nn\n'), ((20405, 20440), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (20414, 20440), True, 'import torch.nn as nn\n'), ((20478, 20487), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (20485, 20487), True, 'import torch.nn as nn\n'), ((20525, 20562), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(self.mL_w + 1)'], {}), '(hidden_size, self.mL_w + 1)\n', (20534, 20562), True, 'import torch.nn as nn\n'), ((24848, 24857), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (24855, 24857), True, 'import torch.nn as nn\n'), ((24859, 24888), 'torch.nn.Linear', 'nn.Linear', (['(2 * hidden_size)', '(1)'], {}), '(2 * hidden_size, 1)\n', (24868, 24888), True, 'import torch.nn as nn\n'), ((27192, 27210), 'torch.isnan', 'torch.isnan', (['score'], {}), '(score)\n', (27203, 27210), False, 'import torch\n'), ((28580, 28619), 'torch.nn.Linear', 'nn.Linear', (['(2 * hidden_size)', 'hidden_size'], {}), '(2 * hidden_size, hidden_size)\n', (28589, 28619), True, 'import torch.nn as nn\n'), ((28631, 28640), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (28638, 28640), True, 'import torch.nn as nn\n'), ((28654, 28688), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'n_cond_ops'], {}), '(hidden_size, n_cond_ops)\n', (28663, 28688), True, 'import torch.nn as nn\n'), ((29770, 29793), 'torch.stack', 'torch.stack', (['(real + pad)'], {}), '(real + pad)\n', (29781, 29793), False, 'import torch\n'), ((34676, 34699), 'torch.stack', 'torch.stack', (['(real + pad)'], {}), '(real + pad)\n', (34687, 34699), False, 'import torch\n'), ((36686, 36725), 'torch.zeros', 'torch.zeros', (['self.mL_w', 'self.n_cond_ops'], {}), '(self.mL_w, self.n_cond_ops)\n', (36697, 36725), False, 'import torch\n'), ((40128, 40156), 'torch.zeros', 'torch.zeros', (['[bS, max_h_len]'], {}), '([bS, max_h_len])\n', (40139, 40156), False, 'import torch\n'), ((44422, 44431), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (44429, 44431), True, 'import torch.nn as nn\n'), ((44433, 44458), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (44442, 44458), True, 'import torch.nn as nn\n'), ((48037, 48062), 'torch.stack', 'torch.stack', (['s_wv_list', '(1)'], {}), '(s_wv_list, 1)\n', (48048, 48062), False, 'import torch\n'), ((63754, 63777), 'torch.nn.functional.softmax', 'F.softmax', (['s_sa'], {'dim': '(-1)'}), '(s_sa, dim=-1)\n', (63763, 63777), True, 'import torch.nn.functional as F\n'), ((64231, 64253), 'torch.tensor', 'torch.tensor', (['prob_sca'], {}), '(prob_sca)\n', (64243, 64253), False, 'import torch\n'), ((70430, 70450), 'torch.tensor', 'torch.tensor', (['prob_w'], {}), '(prob_w)\n', (70442, 70450), False, 'import torch\n'), ((4460, 4509), 'torch.zeros', 'torch.zeros', (['[bS, beam_size, self.number_agg_ops]'], {}), '([bS, beam_size, self.number_agg_ops])\n', (4471, 4509), False, 'import torch\n'), ((4540, 4568), 'torch.zeros_like', 'torch.zeros_like', (['prob_sc_sa'], {}), '(prob_sc_sa)\n', (4556, 4568), False, 'import torch\n'), ((26938, 26968), 'torch.mul', 'torch.mul', (['encoded_question', 'p'], {}), '(encoded_question, p)\n', (26947, 26968), False, 'import torch\n'), ((33198, 33227), 'torch.nn.Linear', 'nn.Linear', (['(4 * hidden_size)', '(2)'], {}), '(4 * hidden_size, 2)\n', (33207, 33227), True, 'import torch.nn as nn\n'), ((33313, 33352), 'torch.nn.Linear', 'nn.Linear', (['(4 * hidden_size)', 'hidden_size'], {}), '(4 * hidden_size, hidden_size)\n', (33322, 33352), True, 'import torch.nn as nn\n'), ((33370, 33379), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (33377, 33379), True, 'import torch.nn as nn\n'), ((33397, 33422), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(2)'], {}), '(hidden_size, 2)\n', (33406, 33422), True, 'import torch.nn as nn\n'), ((37106, 37131), 'torch.tensor', 'torch.tensor', (['idx_scatter'], {}), '(idx_scatter)\n', (37118, 37131), False, 'import torch\n'), ((39771, 39789), 'torch.tensor', 'torch.tensor', (['g_sc'], {}), '(g_sc)\n', (39783, 39789), False, 'import torch\n'), ((39878, 39896), 'torch.tensor', 'torch.tensor', (['g_sa'], {}), '(g_sa)\n', (39890, 39896), False, 'import torch\n'), ((39984, 40002), 'torch.tensor', 'torch.tensor', (['g_wn'], {}), '(g_wn)\n', (39996, 40002), False, 'import torch\n'), ((45055, 45106), 'torch.zeros', 'torch.zeros', (['[self.num_layer, bS, self.hidden_size]'], {}), '([self.num_layer, bS, self.hidden_size])\n', (45066, 45106), False, 'import torch\n'), ((45132, 45183), 'torch.zeros', 'torch.zeros', (['[self.num_layer, bS, self.hidden_size]'], {}), '([self.num_layer, bS, self.hidden_size])\n', (45143, 45183), False, 'import torch\n'), ((48925, 48976), 'torch.zeros', 'torch.zeros', (['[self.num_layer, bS, self.hidden_size]'], {}), '([self.num_layer, bS, self.hidden_size])\n', (48936, 48976), False, 'import torch\n'), ((49002, 49053), 'torch.zeros', 'torch.zeros', (['[self.num_layer, bS, self.hidden_size]'], {}), '([self.num_layer, bS, self.hidden_size])\n', (49013, 49053), False, 'import torch\n'), ((51507, 51522), 'torch.log', 'torch.log', (['prob'], {}), '(prob)\n', (51516, 51522), False, 'import torch\n'), ((57336, 57367), 'torch.zeros', 'torch.zeros', (['bS', 'max_header_len'], {}), '(bS, max_header_len)\n', (57347, 57367), False, 'import torch\n'), ((57886, 57919), 'torch.zeros', 'torch.zeros', (['[bS, self.n_agg_ops]'], {}), '([bS, self.n_agg_ops])\n', (57897, 57919), False, 'import torch\n'), ((58172, 58209), 'torch.zeros', 'torch.zeros', (['bS', '(self.n_where_num + 1)'], {}), '(bS, self.n_where_num + 1)\n', (58183, 58209), False, 'import torch\n'), ((58417, 58451), 'torch.zeros', 'torch.zeros', (['bS', 'max_header_len', '(1)'], {}), '(bS, max_header_len, 1)\n', (58428, 58451), False, 'import torch\n'), ((58823, 58875), 'torch.zeros', 'torch.zeros', (['[bS, self.n_where_num, self.n_cond_ops]'], {}), '([bS, self.n_where_num, self.n_cond_ops])\n', (58834, 58875), False, 'import torch\n'), ((63176, 63220), 'torch.zeros', 'torch.zeros', (['[bS, beam_size, self.n_agg_ops]'], {}), '([bS, beam_size, self.n_agg_ops])\n', (63187, 63220), False, 'import torch\n'), ((63254, 63298), 'torch.zeros', 'torch.zeros', (['[bS, beam_size, self.n_agg_ops]'], {}), '([bS, beam_size, self.n_agg_ops])\n', (63265, 63298), False, 'import torch\n'), ((63330, 63358), 'torch.zeros_like', 'torch.zeros_like', (['prob_sc_sa'], {}), '(prob_sc_sa)\n', (63346, 63358), False, 'import torch\n'), ((40622, 40641), 'torch.tensor', 'torch.tensor', (['g_wo1'], {}), '(g_wo1)\n', (40634, 40641), False, 'import torch\n'), ((41167, 41187), 'torch.tensor', 'torch.tensor', (['g_wvi1'], {}), '(g_wvi1)\n', (41179, 41187), False, 'import torch\n'), ((45628, 45664), 'torch.zeros', 'torch.zeros', (['bS', 'self.Tmax', 'mL_input'], {}), '(bS, self.Tmax, mL_input)\n', (45639, 45664), False, 'import torch\n'), ((74700, 74729), 'torch.tensor', 'torch.tensor', (['g_pnt_idxs1[1:]'], {}), '(g_pnt_idxs1[1:])\n', (74712, 74729), False, 'import torch\n'), ((47738, 47763), 'torch.zeros', 'torch.zeros', (['bS', 'mL_input'], {}), '(bS, mL_input)\n', (47749, 47763), False, 'import torch\n'), ((55922, 55947), 'torch.zeros', 'torch.zeros', (['bS', 'mL_input'], {}), '(bS, mL_input)\n', (55933, 55947), False, 'import torch\n'), ((56159, 56182), 'torch.tensor', 'torch.tensor', (['idx_batch'], {}), '(idx_batch)\n', (56171, 56182), False, 'import torch\n'), ((7033, 7056), 'torch.nn.functional.softmax', 'F.softmax', (['s_wn'], {'dim': '(-1)'}), '(s_wn, dim=-1)\n', (7042, 7056), True, 'import torch.nn.functional as F\n'), ((7327, 7342), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['s_wc'], {}), '(s_wc)\n', (7336, 7342), True, 'import torch.nn.functional as F\n'), ((8063, 8090), 'torch.nn.functional.softmax', 'F.softmax', (['s_wo_max'], {'dim': '(-1)'}), '(s_wo_max, dim=-1)\n', (8072, 8090), True, 'import torch.nn.functional as F\n'), ((66542, 66565), 'torch.nn.functional.softmax', 'F.softmax', (['s_wn'], {'dim': '(-1)'}), '(s_wn, dim=-1)\n', (66551, 66565), True, 'import torch.nn.functional as F\n'), ((66822, 66841), 'torch.sigmoid', 'torch.sigmoid', (['s_wc'], {}), '(s_wc)\n', (66835, 66841), False, 'import torch\n'), ((67546, 67573), 'torch.nn.functional.softmax', 'F.softmax', (['s_wo_max'], {'dim': '(-1)'}), '(s_wo_max, dim=-1)\n', (67555, 67573), True, 'import torch.nn.functional as F\n'), ((8538, 8561), 'torch.nn.functional.softmax', 'F.softmax', (['s_wv'], {'dim': '(-2)'}), '(s_wv, dim=-2)\n', (8547, 8561), True, 'import torch.nn.functional as F\n'), ((68061, 68084), 'torch.nn.functional.softmax', 'F.softmax', (['s_wv'], {'dim': '(-2)'}), '(s_wv, dim=-2)\n', (68070, 68084), True, 'import torch.nn.functional as F\n')] |
import inspect
import numpy as np
from pandas._libs import reduction as libreduction
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import ABCSeries
def frame_apply(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not isinstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwds["axis"] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._data.apply("apply", func=self.f)
return self.obj._constructor(
data=results, index=self.index, columns=self.columns, copy=False
)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
r = self.f(Series([]))
except Exception:
pass
else:
should_reduce = not isinstance(r, Series)
if should_reduce:
if len(self.agg_axis):
r = self.f(Series([]))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (
self.result_type in ["reduce", None]
and not self.dtypes.apply(is_extension_array_dtype).any()
# Disallow complex_internals since libreduction shortcut
# cannot handle MultiIndex
and not self.agg_axis._has_complex_internals
):
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
# Preserve subclass for e.g. test_subclassed_apply
dummy = self.obj._constructor_sliced(
empty_arr, index=index, dtype=values.dtype
)
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
except TypeError:
# e.g. test_apply_ignore_failures we just ignore
if not self.ignore_failures:
raise
except ZeroDivisionError:
# reached via numexpr; fall back to python implementation
pass
else:
return self.obj._constructor_sliced(result, index=labels)
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
except Exception:
pass
else:
keys.append(v.name)
successes.append(i)
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self):
return super().apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(self.res_index):
result.columns = self.res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super().apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (
constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values, self.index))
)
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
| [
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.reduction.compute_reduction",
"pandas.Series",
"pandas.core.dtypes.common.is_sequence",
"numpy.asarray",
"inspect.getfullargspec",
"numpy.errstate",
"numpy.apply_along_axis",
"numpy.empty_like",
"pandas.core.dtypes.common.is_dict_like"
] | [((5381, 5409), 'numpy.empty_like', 'np.empty_like', (['target.values'], {}), '(target.values)\n', (5394, 5409), True, 'import numpy as np\n'), ((2193, 2213), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['self.f'], {}), '(self.f)\n', (2205, 2213), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((2217, 2237), 'pandas.core.dtypes.common.is_dict_like', 'is_dict_like', (['self.f'], {}), '(self.f)\n', (2229, 2237), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((2764, 2792), 'inspect.getfullargspec', 'inspect.getfullargspec', (['func'], {}), '(func)\n', (2786, 2792), False, 'import inspect\n'), ((4742, 4809), 'pandas._libs.reduction.compute_reduction', 'libreduction.compute_reduction', (['self.values', 'self.f'], {'axis': 'self.axis'}), '(self.values, self.f, axis=self.axis)\n', (4772, 4809), True, 'from pandas._libs import reduction as libreduction\n'), ((9164, 9187), 'pandas.core.dtypes.common.is_sequence', 'is_sequence', (['results[0]'], {}), '(results[0])\n', (9175, 9187), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((5023, 5074), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.f', 'self.axis', 'self.values'], {}), '(self.f, self.axis, self.values)\n', (5042, 5074), True, 'import numpy as np\n'), ((5610, 5625), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (5620, 5625), True, 'import numpy as np\n'), ((7224, 7318), 'pandas._libs.reduction.compute_reduction', 'libreduction.compute_reduction', (['values', 'self.f'], {'axis': 'self.axis', 'dummy': 'dummy', 'labels': 'labels'}), '(values, self.f, axis=self.axis, dummy=dummy,\n labels=labels)\n', (7254, 7318), True, 'from pandas._libs import reduction as libreduction\n'), ((11205, 11220), 'pandas.Series', 'Series', (['results'], {}), '(results)\n', (11211, 11220), False, 'from pandas import Series\n'), ((3000, 3025), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (3011, 3025), True, 'import numpy as np\n'), ((4222, 4232), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (4228, 4232), False, 'from pandas import Series\n'), ((4450, 4460), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (4456, 4460), False, 'from pandas import Series\n')] |
import os.path
from collections import Counter
import pytest
INPUT_TXT = os.path.join(os.path.dirname(__file__), 'input.txt')
def compute(s: str) -> int:
lines = s.splitlines()
numbers = Counter(int(f) for f in lines[0].split(","))
for d in range(80):
numbers2 = Counter({8: numbers[0], 6: numbers[0]})
for k, v in numbers.items():
if k >= 1:
numbers2[k - 1] += v
numbers = numbers2
return sum(numbers.values())
INPUT_S = '''\
3,4,3,1,2
'''
EXPECTED = 5934
@pytest.mark.parametrize(
('input_s', 'expected'),
(
(INPUT_S, EXPECTED),
),
)
def test(input_s: str, expected: int) -> None:
assert compute(input_s) == expected
def main() -> int:
with open(INPUT_TXT, "r") as f:
print(compute(f.read()))
return 0
if __name__ == '__main__':
raise SystemExit(main())
| [
"collections.Counter",
"pytest.mark.parametrize"
] | [((535, 607), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('input_s', 'expected')", '((INPUT_S, EXPECTED),)'], {}), "(('input_s', 'expected'), ((INPUT_S, EXPECTED),))\n", (558, 607), False, 'import pytest\n'), ((288, 331), 'collections.Counter', 'Counter', (['{(8): numbers[0], (6): numbers[0]}'], {}), '({(8): numbers[0], (6): numbers[0]})\n', (295, 331), False, 'from collections import Counter\n')] |
from pathlib import Path
root = Path(__file__).parent.absolute()
import envo
envo.add_source_roots([root])
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from envo import Env, Namespace, env_var, logger, run
from env_comm import StickybeakCommEnv as ParentEnv
p = Namespace("p")
class StickybeakCiEnv(ParentEnv):
class Meta(ParentEnv.Meta):
stage: str = "ci"
emoji: str = "⚙"
load_env_vars = True
class Environ(ParentEnv.Environ):
pypi_username: Optional[str] = env_var(raw=True)
pypi_password: Optional[str] = env_var(raw=True)
e: Environ
def init(self) -> None:
super().init()
@p.command
def bootstrap(self, test_apps=True) -> None:
super().bootstrap(test_apps)
@p.command
def test(self) -> None:
run("pytest --reruns 2 -v tests")
@p.command
def build(self) -> None:
run("poetry build")
@p.command
def publish(self) -> None:
run(f'poetry publish --username "{self.e.pypi_username}" --password "{self.e.pypi_password}"', verbose=False)
@p.command
def rstcheck(self) -> None:
pass
# run("rstcheck README.rst | tee ./workspace/rstcheck.txt")
@p.command
def flake(self) -> None:
pass
# run("flake8 . | tee ./workspace/flake8.txt")
@p.command
def check_black(self) -> None:
run("black --check .")
@p.command
def check_isort(self) -> None:
run("black --check .")
@p.command
def mypy(self) -> None:
pass
run("mypy .")
@p.command
def generate_version(self) -> None:
import toml
config = toml.load(str(self.meta.root / "pyproject.toml"))
version: str = config["tool"]["poetry"]["version"]
version_file = self.meta.root / "stickybeak/__version__.py"
Path(version_file).touch()
version_file.write_text(f'__version__ = "{version}"\n')
ThisEnv = StickybeakCiEnv
| [
"envo.Namespace",
"pathlib.Path",
"envo.run",
"envo.add_source_roots",
"envo.env_var"
] | [((80, 109), 'envo.add_source_roots', 'envo.add_source_roots', (['[root]'], {}), '([root])\n', (101, 109), False, 'import envo\n'), ((301, 315), 'envo.Namespace', 'Namespace', (['"""p"""'], {}), "('p')\n", (310, 315), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((542, 559), 'envo.env_var', 'env_var', ([], {'raw': '(True)'}), '(raw=True)\n', (549, 559), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((599, 616), 'envo.env_var', 'env_var', ([], {'raw': '(True)'}), '(raw=True)\n', (606, 616), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((839, 872), 'envo.run', 'run', (['"""pytest --reruns 2 -v tests"""'], {}), "('pytest --reruns 2 -v tests')\n", (842, 872), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((926, 945), 'envo.run', 'run', (['"""poetry build"""'], {}), "('poetry build')\n", (929, 945), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((1001, 1115), 'envo.run', 'run', (['f"""poetry publish --username "{self.e.pypi_username}" --password "{self.e.pypi_password}\\""""'], {'verbose': '(False)'}), '(f\'poetry publish --username "{self.e.pypi_username}" --password "{self.e.pypi_password}"\'\n , verbose=False)\n', (1004, 1115), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((1412, 1434), 'envo.run', 'run', (['"""black --check ."""'], {}), "('black --check .')\n", (1415, 1434), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((1494, 1516), 'envo.run', 'run', (['"""black --check ."""'], {}), "('black --check .')\n", (1497, 1516), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((1582, 1595), 'envo.run', 'run', (['"""mypy ."""'], {}), "('mypy .')\n", (1585, 1595), False, 'from envo import Env, Namespace, env_var, logger, run\n'), ((33, 47), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (37, 47), False, 'from pathlib import Path\n'), ((1876, 1894), 'pathlib.Path', 'Path', (['version_file'], {}), '(version_file)\n', (1880, 1894), False, 'from pathlib import Path\n')] |
import os
import sys
import shutil
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools'))
# BSP dist function
def dist_do_building(BSP_ROOT, dist_dir):
from mkdist import bsp_copy_files
import rtconfig
library_dir = os.path.join(dist_dir, 'libraries')
print("=> copy nrf52 bsp libraries")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
bsp_copy_files(library_path, library_dir)
| [
"mkdist.bsp_copy_files",
"os.path.dirname",
"os.path.join",
"os.getcwd"
] | [((46, 57), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (55, 57), False, 'import os\n'), ((278, 313), 'os.path.join', 'os.path.join', (['dist_dir', '"""libraries"""'], {}), "(dist_dir, 'libraries')\n", (290, 313), False, 'import os\n'), ((433, 474), 'mkdist.bsp_copy_files', 'bsp_copy_files', (['library_path', 'library_dir'], {}), '(library_path, library_dir)\n', (447, 474), False, 'from mkdist import bsp_copy_files\n'), ((87, 112), 'os.path.dirname', 'os.path.dirname', (['cwd_path'], {}), '(cwd_path)\n', (102, 112), False, 'import os\n'), ((388, 413), 'os.path.dirname', 'os.path.dirname', (['BSP_ROOT'], {}), '(BSP_ROOT)\n', (403, 413), False, 'import os\n')] |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This module contains utilities for using multi-methods in
spack. You can think of multi-methods like overloaded methods --
they're methods with the same name, and we need to select a version
of the method based on some criteria. e.g., for overloaded
methods, you would select a version of the method to call based on
the types of its arguments.
In spack, multi-methods are used to ease the life of package
authors. They allow methods like install() (or other methods
called by install()) to declare multiple versions to be called when
the package is instantiated with different specs. e.g., if the
package is built with OpenMPI on x86_64,, you might want to call a
different install method than if it was built for mpich2 on
BlueGene/Q. Likewise, you might want to do a different type of
install for different versions of the package.
Multi-methods provide a simple decorator-based syntax for this that
avoids overly complicated rat nests of if statements. Obviously,
depending on the scenario, regular old conditionals might be clearer,
so package authors should use their judgement.
"""
import functools
import inspect
from llnl.util.lang import caller_locals
import spack.architecture
import spack.error
from spack.spec import Spec
class MultiMethodMeta(type):
"""This allows us to track the class's dict during instantiation."""
#: saved dictionary of attrs on the class being constructed
_locals = None
@classmethod
def __prepare__(cls, name, bases, **kwargs):
"""Save the dictionary that will be used for the class namespace."""
MultiMethodMeta._locals = dict()
return MultiMethodMeta._locals
def __init__(cls, name, bases, attr_dict):
"""Clear out the cached locals dict once the class is built."""
MultiMethodMeta._locals = None
super(MultiMethodMeta, cls).__init__(name, bases, attr_dict)
class SpecMultiMethod(object):
"""This implements a multi-method for Spack specs. Packages are
instantiated with a particular spec, and you may want to
execute different versions of methods based on what the spec
looks like. For example, you might want to call a different
version of install() for one platform than you call on another.
The SpecMultiMethod class implements a callable object that
handles method dispatch. When it is called, it looks through
registered methods and their associated specs, and it tries
to find one that matches the package's spec. If it finds one
(and only one), it will call that method.
This is intended for use with decorators (see below). The
decorator (see docs below) creates SpecMultiMethods and
registers method versions with them.
To register a method, you can do something like this:
mm = SpecMultiMethod()
mm.register("^chaos_5_x86_64_ib", some_method)
The object registered needs to be a Spec or some string that
will parse to be a valid spec.
When the mm is actually called, it selects a version of the
method to call based on the sys_type of the object it is
called on.
See the docs for decorators below for more details.
"""
def __init__(self, default=None):
self.method_list = []
self.default = default
if default:
functools.update_wrapper(self, default)
def register(self, spec, method):
"""Register a version of a method for a particular spec."""
self.method_list.append((spec, method))
if not hasattr(self, '__name__'):
functools.update_wrapper(self, method)
else:
assert(self.__name__ == method.__name__)
def __get__(self, obj, objtype):
"""This makes __call__ support instance methods."""
# Method_list is a list of tuples (constraint, method)
# Here we are going to assume that we have at least one
# element in the list. The first registered function
# will be the one 'wrapped'.
wrapped_method = self.method_list[0][1]
# Call functools.wraps manually to get all the attributes
# we need to be disguised as the wrapped_method
func = functools.wraps(wrapped_method)(
functools.partial(self.__call__, obj)
)
return func
def _get_method_by_spec(self, spec):
"""Find the method of this SpecMultiMethod object that satisfies the
given spec, if one exists
"""
for condition, method in self.method_list:
if spec.satisfies(condition):
return method
return self.default or None
def __call__(self, package_self, *args, **kwargs):
"""Find the first method with a spec that matches the
package's spec. If none is found, call the default
or if there is none, then raise a NoSuchMethodError.
"""
spec_method = self._get_method_by_spec(package_self.spec)
if spec_method:
return spec_method(package_self, *args, **kwargs)
# Unwrap the MRO of `package_self by hand. Note that we can't
# use `super()` here, because using `super()` recursively
# requires us to know the class of `package_self`, as well as
# its superclasses for successive calls. We don't have that
# information within `SpecMultiMethod`, because it is not
# associated with the package class.
for cls in inspect.getmro(package_self.__class__)[1:]:
superself = cls.__dict__.get(self.__name__, None)
if isinstance(superself, SpecMultiMethod):
# Check parent multimethod for method for spec.
superself_method = superself._get_method_by_spec(
package_self.spec
)
if superself_method:
return superself_method(package_self, *args, **kwargs)
elif superself:
return superself(package_self, *args, **kwargs)
raise NoSuchMethodError(
type(package_self), self.__name__, package_self.spec,
[m[0] for m in self.method_list]
)
class when(object):
"""This annotation lets packages declare multiple versions of
methods like install() that depend on the package's spec.
For example:
.. code-block:: python
class SomePackage(Package):
...
def install(self, prefix):
# Do default install
@when('target=x86_64:')
def install(self, prefix):
# This will be executed instead of the default install if
# the package's target is in the x86_64 family.
@when('target=ppc64:')
def install(self, prefix):
# This will be executed if the package's target is in
# the ppc64 family
This allows each package to have a default version of install() AND
specialized versions for particular platforms. The version that is
called depends on the architecutre of the instantiated package.
Note that this works for methods other than install, as well. So,
if you only have part of the install that is platform specific, you
could do this:
.. code-block:: python
class SomePackage(Package):
...
# virtual dependence on MPI.
# could resolve to mpich, mpich2, OpenMPI
depends_on('mpi')
def setup(self):
# do nothing in the default case
pass
@when('^openmpi')
def setup(self):
# do something special when this is built with OpenMPI for
# its MPI implementations.
def install(self, prefix):
# Do common install stuff
self.setup()
# Do more common install stuff
Note that the default version of decorated methods must
*always* come first. Otherwise it will override all of the
platform-specific versions. There's not much we can do to get
around this because of the way decorators work.
"""
def __init__(self, condition):
if isinstance(condition, bool):
self.spec = Spec() if condition else None
else:
self.spec = Spec(condition)
def __call__(self, method):
# In Python 2, Get the first definition of the method in the
# calling scope by looking at the caller's locals. In Python 3,
# we handle this using MultiMethodMeta.__prepare__.
if MultiMethodMeta._locals is None:
MultiMethodMeta._locals = caller_locals()
# Create a multimethod with this name if there is not one already
original_method = MultiMethodMeta._locals.get(method.__name__)
if not type(original_method) == SpecMultiMethod:
original_method = SpecMultiMethod(original_method)
if self.spec is not None:
original_method.register(self.spec, method)
return original_method
class MultiMethodError(spack.error.SpackError):
"""Superclass for multimethod dispatch errors"""
def __init__(self, message):
super(MultiMethodError, self).__init__(message)
class NoSuchMethodError(spack.error.SpackError):
"""Raised when we can't find a version of a multi-method."""
def __init__(self, cls, method_name, spec, possible_specs):
super(NoSuchMethodError, self).__init__(
"Package %s does not support %s called with %s. Options are: %s"
% (cls.__name__, method_name, spec,
", ".join(str(s) for s in possible_specs)))
| [
"spack.spec.Spec",
"llnl.util.lang.caller_locals",
"functools.wraps",
"functools.partial",
"inspect.getmro",
"functools.update_wrapper"
] | [((3566, 3605), 'functools.update_wrapper', 'functools.update_wrapper', (['self', 'default'], {}), '(self, default)\n', (3590, 3605), False, 'import functools\n'), ((3816, 3854), 'functools.update_wrapper', 'functools.update_wrapper', (['self', 'method'], {}), '(self, method)\n', (3840, 3854), False, 'import functools\n'), ((4431, 4462), 'functools.wraps', 'functools.wraps', (['wrapped_method'], {}), '(wrapped_method)\n', (4446, 4462), False, 'import functools\n'), ((4476, 4513), 'functools.partial', 'functools.partial', (['self.__call__', 'obj'], {}), '(self.__call__, obj)\n', (4493, 4513), False, 'import functools\n'), ((5684, 5722), 'inspect.getmro', 'inspect.getmro', (['package_self.__class__'], {}), '(package_self.__class__)\n', (5698, 5722), False, 'import inspect\n'), ((8646, 8661), 'spack.spec.Spec', 'Spec', (['condition'], {}), '(condition)\n', (8650, 8661), False, 'from spack.spec import Spec\n'), ((8978, 8993), 'llnl.util.lang.caller_locals', 'caller_locals', ([], {}), '()\n', (8991, 8993), False, 'from llnl.util.lang import caller_locals\n'), ((8578, 8584), 'spack.spec.Spec', 'Spec', ([], {}), '()\n', (8582, 8584), False, 'from spack.spec import Spec\n')] |
"""
Create diaries in A5 and A4 sizes based on PDF templates.
<NAME>
"""
import datetime
import math
import sys
from io import BytesIO
from pathlib import Path
from PyPDF2 import PdfFileReader, PdfFileWriter
from reportlab.lib.pagesizes import A5, A4
from reportlab.lib.utils import ImageReader
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFError, TTFont
from reportlab.pdfgen import canvas
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', Path(__file__).resolve().parent)
return base_path / Path(relative_path)
CORNER_DIR = resource_path("input/1_diaries_to_create/resources")
LOGO_PATH = resource_path(CORNER_DIR / Path("logo.png"))
DEFAULT_FONT = resource_path(CORNER_DIR / Path('FreeSansLocal.ttf'))
CREATED_DIARIES_DIR = resource_path("output/created_diaries/")
#############################################################
#############################################################
#############################################################
##### Algorithm to convert A4 pages into an A5 booklet ######
#############################################################
#############################################################
#############################################################
## Adapted from the work by <NAME>, https://bitbucket.org/spookylukey/booklet-maker/src
class Sheet(object):
'''A4 Sheets'''
def __init__(self):
self.front = PrintPage()
self.back = PrintPage()
class PrintPage(object):
'''A4 page with containers for A4 pages'''
def __init__(self):
self.left = PageContainer()
self.right = PageContainer()
class PageContainer(object):
'''A5 containers'''
def __init__(self):
self.page = None
def build_booklet(pages):
''' Build booklet '''
# Double sized page, with double-sided printing, fits 4 of the original.
sheet_count = int(math.ceil(len(pages) / 4.0))
booklet = [Sheet() for i in range(0, sheet_count)]
# Assign input pages to sheets
# This is the core algo. To understand it:
# * pick up 3 A4 sheets, landscape
# * number the sheets from 1 to 3, starting with bottom one
# * fold the stack in the middle to form an A5 booklet
# * work out what order you need to use the front left,
# front right, back left and back right sides.
def containers():
'''Yields parts of the booklet in the order they should be used.'''
for sheet in booklet:
yield sheet.back.right
yield sheet.front.left
for sheet in reversed(booklet):
yield sheet.front.right
yield sheet.back.left
for container, page in zip(containers(), pages):
container.page = page
return booklet
def add_double_page(writer, page_size, print_page):
''' Adds a double page '''
width, height = page_size
page = writer.insertBlankPage(width=width, height=height, index=writer.getNumPages())
# Merge the left page
l_page = print_page.left.page
if l_page is not None:
page.mergePage(l_page)
# Merge the right page with translation
r_page = print_page.right.page
if r_page is not None:
page.mergeTranslatedPage(r_page, width / 2, 0)
def convert_to_a5_booklet(input_file, blanks=0):
'''Converts a PDF into a double sided A5 file to print as an A4 (two A5 pages per A4 page)'''
# Create internal dir to save the a5 files
a5_booklets_dir = CREATED_DIARIES_DIR
Path.mkdir(a5_booklets_dir, parents=True, exist_ok=True)
# Create the a5 booklet's name
a5_booklet_name = Path(input_file).stem + "_as_a5_booklet"
a5_booklet = a5_booklets_dir / Path("{}.pdf".format(a5_booklet_name))
reader = PdfFileReader(open(input_file, "rb"))
pages = [reader.getPage(p) for p in range(0, reader.getNumPages())]
for index in range(0, blanks):
pages.insert(0, None)
sheets = build_booklet(pages)
writer = PdfFileWriter()
firs_page = reader.getPage(0)
input_width = firs_page.mediaBox.getWidth()
output_width = input_width * 2
input_height = firs_page.mediaBox.getHeight()
output_height = input_height
page_size = (output_width, output_height)
# We want to group fronts and backs together.
for sheet in sheets:
add_double_page(writer, page_size, sheet.back)
add_double_page(writer, page_size, sheet.front)
with open(a5_booklet, "wb") as a5_booklet_stream:
writer.write(a5_booklet_stream)
return a5_booklet
#############################################################
#############################################################
#############################################################
########## Create A4 paper diary ############
#############################################################
#############################################################
#############################################################
def create_diary_cover(participant_id, email, font):
'''Create cover of the A5 diary'''
packet = BytesIO()
cover_canvas = canvas.Canvas(packet, pagesize=A4)
width, height = A4
# Centering the logo or participant ID
if Path.exists(LOGO_PATH):
logo = ImageReader(LOGO_PATH)
cover_canvas.drawImage(logo, x=(width * (1/6.0)),
y=(height/4),
width=width * (4/6.0),
preserveAspectRatio=True,
mask='auto')
else:
cover_canvas.setFont(font, 50)
cover_canvas.drawCentredString(width/2, height/2, participant_id)
# Lost legend
if not (email is None or email == ""):
cover_canvas.setFont(font, 15)
cover_canvas.drawCentredString(width/2, 50,
"If you find this document, please email " + email)
cover_canvas.save()
packet.seek(0)
return PdfFileReader(packet).getPage(0)
def create_diary_page(pdf_template, font, top_left_text, page_number, top_right_text):
packet = BytesIO()
diary_canvas = canvas.Canvas(packet, pagesize=A5)
# Header
diary_canvas.setFont(font, 11)
#diary_canvas.drawRightString(378, 562, str(top_right_text))
diary_canvas.drawString(36.5, 562, top_left_text)
# Corners
corners = [(CORNER_DIR / Path("corner_ul.png"), 25, 553),
(CORNER_DIR / Path("corner_ur.png"), 365, 553),
(CORNER_DIR / Path("corner_bl.png"), 25, 15),
(CORNER_DIR / Path("corner_br.png"), 365, 15)]
for corner_path, x, y in corners:
if corner_path.exists():
corner = ImageReader(corner_path)
diary_canvas.drawImage(corner, x=x, y=y, mask='auto')
# Footer
#diary_canvas.setFont(font, 8)
#diary_canvas.drawString(36.5, 24, str(page_number))
diary_canvas.save()
# Merge template and additions (header, corners and footer)
packet.seek(0)
page_additions = PdfFileReader(packet).getPage(0)
new_page = PdfFileReader(open(pdf_template, "rb")).getPage(0)
new_page.mergePage(page_additions)
new_page.scaleTo(A4[0], A4[1])
return new_page
def create_a4_diary(pdf_template, pages, top_left_text, email=None, font='Arial'):
"""Creates an A4 document with [PAGES] from [STARTING_DATE]"""
starting_date = parse_date(top_left_text)
font = set_active_font(font)
# Create output folder/file
if not Path(pdf_template).exists():
raise ValueError("Template does not exist {}".format(pdf_template))
Path.mkdir(CREATED_DIARIES_DIR, parents=True, exist_ok=True)
a4_document_name = Path(pdf_template).stem
a4_document_path = CREATED_DIARIES_DIR / Path("{}_document.pdf".format(a4_document_name))
pdf_file = PdfFileWriter()
# Cover
pdf_file.addPage(create_diary_cover(a4_document_name, email, font))
pdf_file.addBlankPage()
# Pages
for page in range(1, pages+1):
if starting_date is not None:
top_left_text = starting_date.strftime('%A, %d %b %Y')
starting_date += datetime.timedelta(days=1)
new_page = create_diary_page(pdf_template, font, top_left_text,page, a4_document_name)
pdf_file.addPage(new_page)
# Backcover
pdf_file.addBlankPage()
# Save a4 document
with open(a4_document_path, "wb") as output_stream:
pdf_file.write(output_stream)
return a4_document_path
def set_active_font(font):
"""Register the font to use in header and footer of the diary"""
try:
pdfmetrics.registerFont(TTFont(font, font + '.ttf'))
except TTFError:
font = 'FreeSansLocal'
pdfmetrics.registerFont(TTFont(font, DEFAULT_FONT))
return font
def parse_date(s):
try:
return datetime.datetime.strptime(s, "%d/%m/%Y")
except ValueError:
return None | [
"pathlib.Path.exists",
"pathlib.Path",
"datetime.datetime.strptime",
"reportlab.pdfbase.ttfonts.TTFont",
"io.BytesIO",
"pathlib.Path.mkdir",
"reportlab.pdfgen.canvas.Canvas",
"PyPDF2.PdfFileWriter",
"datetime.timedelta",
"PyPDF2.PdfFileReader",
"reportlab.lib.utils.ImageReader"
] | [((3588, 3644), 'pathlib.Path.mkdir', 'Path.mkdir', (['a5_booklets_dir'], {'parents': '(True)', 'exist_ok': '(True)'}), '(a5_booklets_dir, parents=True, exist_ok=True)\n', (3598, 3644), False, 'from pathlib import Path\n'), ((4056, 4071), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (4069, 4071), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((5149, 5158), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (5156, 5158), False, 'from io import BytesIO\n'), ((5178, 5212), 'reportlab.pdfgen.canvas.Canvas', 'canvas.Canvas', (['packet'], {'pagesize': 'A4'}), '(packet, pagesize=A4)\n', (5191, 5212), False, 'from reportlab.pdfgen import canvas\n'), ((5287, 5309), 'pathlib.Path.exists', 'Path.exists', (['LOGO_PATH'], {}), '(LOGO_PATH)\n', (5298, 5309), False, 'from pathlib import Path\n'), ((6156, 6165), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6163, 6165), False, 'from io import BytesIO\n'), ((6185, 6219), 'reportlab.pdfgen.canvas.Canvas', 'canvas.Canvas', (['packet'], {'pagesize': 'A5'}), '(packet, pagesize=A5)\n', (6198, 6219), False, 'from reportlab.pdfgen import canvas\n'), ((7657, 7717), 'pathlib.Path.mkdir', 'Path.mkdir', (['CREATED_DIARIES_DIR'], {'parents': '(True)', 'exist_ok': '(True)'}), '(CREATED_DIARIES_DIR, parents=True, exist_ok=True)\n', (7667, 7717), False, 'from pathlib import Path\n'), ((7875, 7890), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (7888, 7890), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((639, 658), 'pathlib.Path', 'Path', (['relative_path'], {}), '(relative_path)\n', (643, 658), False, 'from pathlib import Path\n'), ((765, 781), 'pathlib.Path', 'Path', (['"""logo.png"""'], {}), "('logo.png')\n", (769, 781), False, 'from pathlib import Path\n'), ((825, 850), 'pathlib.Path', 'Path', (['"""FreeSansLocal.ttf"""'], {}), "('FreeSansLocal.ttf')\n", (829, 850), False, 'from pathlib import Path\n'), ((5326, 5348), 'reportlab.lib.utils.ImageReader', 'ImageReader', (['LOGO_PATH'], {}), '(LOGO_PATH)\n', (5337, 5348), False, 'from reportlab.lib.utils import ImageReader\n'), ((7741, 7759), 'pathlib.Path', 'Path', (['pdf_template'], {}), '(pdf_template)\n', (7745, 7759), False, 'from pathlib import Path\n'), ((8882, 8923), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['s', '"""%d/%m/%Y"""'], {}), "(s, '%d/%m/%Y')\n", (8908, 8923), False, 'import datetime\n'), ((3703, 3719), 'pathlib.Path', 'Path', (['input_file'], {}), '(input_file)\n', (3707, 3719), False, 'from pathlib import Path\n'), ((6022, 6043), 'PyPDF2.PdfFileReader', 'PdfFileReader', (['packet'], {}), '(packet)\n', (6035, 6043), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((6746, 6770), 'reportlab.lib.utils.ImageReader', 'ImageReader', (['corner_path'], {}), '(corner_path)\n', (6757, 6770), False, 'from reportlab.lib.utils import ImageReader\n'), ((7077, 7098), 'PyPDF2.PdfFileReader', 'PdfFileReader', (['packet'], {}), '(packet)\n', (7090, 7098), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((8186, 8212), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8204, 8212), False, 'import datetime\n'), ((8673, 8700), 'reportlab.pdfbase.ttfonts.TTFont', 'TTFont', (['font', "(font + '.ttf')"], {}), "(font, font + '.ttf')\n", (8679, 8700), False, 'from reportlab.pdfbase.ttfonts import TTFError, TTFont\n'), ((6432, 6453), 'pathlib.Path', 'Path', (['"""corner_ul.png"""'], {}), "('corner_ul.png')\n", (6436, 6453), False, 'from pathlib import Path\n'), ((6495, 6516), 'pathlib.Path', 'Path', (['"""corner_ur.png"""'], {}), "('corner_ur.png')\n", (6499, 6516), False, 'from pathlib import Path\n'), ((6559, 6580), 'pathlib.Path', 'Path', (['"""corner_bl.png"""'], {}), "('corner_bl.png')\n", (6563, 6580), False, 'from pathlib import Path\n'), ((6621, 6642), 'pathlib.Path', 'Path', (['"""corner_br.png"""'], {}), "('corner_br.png')\n", (6625, 6642), False, 'from pathlib import Path\n'), ((7547, 7565), 'pathlib.Path', 'Path', (['pdf_template'], {}), '(pdf_template)\n', (7551, 7565), False, 'from pathlib import Path\n'), ((8794, 8820), 'reportlab.pdfbase.ttfonts.TTFont', 'TTFont', (['font', 'DEFAULT_FONT'], {}), '(font, DEFAULT_FONT)\n', (8800, 8820), False, 'from reportlab.pdfbase.ttfonts import TTFError, TTFont\n'), ((583, 597), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (587, 597), False, 'from pathlib import Path\n')] |
# Copyright (c) 2017-2018 {Flair Inc.} <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from taf.foundation.utils import ConnectionCache
class AUT(object):
cache = None
current = None
def __init__(
self,
name=None,
identifier=None,
**kwargs
):
if not AUT.cache:
AUT.cache = ConnectionCache(identifier)
self.id = self.cache.register(
self._create_instance(name, **kwargs),
identifier
)
AUT.current = self
@staticmethod
def launch(app_location, **kwargs):
raise NotImplementedError(
'Launch application'
)
def activate(self):
if self.id != self.cache.current_key:
self.cache.current_key = self.id
AUT.current = self
def take_screenshot(self):
self.activate()
return self.get_screenshot_data()
def close(self):
self.cache.close(self.id)
if not self.cache.current:
AUT.cache = None
AUT.current = None
def get_screenshot_data(self):
raise NotImplementedError(
'Get screenshot data from AUT'
)
def _create_instance(self, name, **kwargs):
raise NotImplementedError(
'Create instance of AUT'
)
| [
"taf.foundation.utils.ConnectionCache"
] | [((862, 889), 'taf.foundation.utils.ConnectionCache', 'ConnectionCache', (['identifier'], {}), '(identifier)\n', (877, 889), False, 'from taf.foundation.utils import ConnectionCache\n')] |
"""
Contexts are the "values" that Python would return. However Contexts are at the
same time also the "contexts" that a user is currently sitting in.
A ContextSet is typically used to specify the return of a function or any other
static analysis operation. In jedi there are always multiple returns and not
just one.
"""
from functools import reduce
from operator import add
from parso.python.tree import ExprStmt, SyncCompFor
from jedi import debug
from jedi._compatibility import zip_longest, unicode
from jedi.parser_utils import clean_scope_docstring
from jedi.common import BaseContextSet, BaseContext
from jedi.evaluate.helpers import SimpleGetItemNotFound
from jedi.evaluate.utils import safe_property
from jedi.evaluate.cache import evaluator_as_method_param_cache
from jedi.cache import memoize_method
_sentinel = object()
class HelperContextMixin(object):
def get_root_context(self):
context = self
while True:
if context.parent_context is None:
return context
context = context.parent_context
@classmethod
@evaluator_as_method_param_cache()
def create_cached(cls, *args, **kwargs):
return cls(*args, **kwargs)
def execute(self, arguments):
return self.evaluator.execute(self, arguments=arguments)
def execute_evaluated(self, *value_list):
from jedi.evaluate.arguments import ValuesArguments
arguments = ValuesArguments([ContextSet([value]) for value in value_list])
return self.evaluator.execute(self, arguments)
def execute_annotation(self):
return self.execute_evaluated()
def gather_annotation_classes(self):
return ContextSet([self])
def merge_types_of_iterate(self, contextualized_node=None, is_async=False):
return ContextSet.from_sets(
lazy_context.infer()
for lazy_context in self.iterate(contextualized_node, is_async)
)
def py__getattribute__(self, name_or_str, name_context=None, position=None,
search_global=False, is_goto=False,
analysis_errors=True):
"""
:param position: Position of the last statement -> tuple of line, column
"""
if name_context is None:
name_context = self
from jedi.evaluate import finder
f = finder.NameFinder(self.evaluator, self, name_context, name_or_str,
position, analysis_errors=analysis_errors)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
def py__await__(self):
await_context_set = self.py__getattribute__(u"__await__")
if not await_context_set:
debug.warning('Tried to run __await__ on context %s', self)
return await_context_set.execute_evaluated()
def eval_node(self, node):
return self.evaluator.eval_element(self, node)
def create_context(self, node, node_is_context=False, node_is_object=False):
return self.evaluator.create_context(self, node, node_is_context, node_is_object)
def iterate(self, contextualized_node=None, is_async=False):
debug.dbg('iterate %s', self)
if is_async:
from jedi.evaluate.lazy_context import LazyKnownContexts
# TODO if no __aiter__ contexts are there, error should be:
# TypeError: 'async for' requires an object with __aiter__ method, got int
return iter([
LazyKnownContexts(
self.py__getattribute__('__aiter__').execute_evaluated()
.py__getattribute__('__anext__').execute_evaluated()
.py__getattribute__('__await__').execute_evaluated()
.py__stop_iteration_returns()
) # noqa
])
return self.py__iter__(contextualized_node)
def is_sub_class_of(self, class_context):
for cls in self.py__mro__():
if cls.is_same_class(class_context):
return True
return False
def is_same_class(self, class2):
# Class matching should prefer comparisons that are not this function.
if type(class2).is_same_class != HelperContextMixin.is_same_class:
return class2.is_same_class(self)
return self == class2
class Context(HelperContextMixin, BaseContext):
"""
Should be defined, otherwise the API returns empty types.
"""
predefined_names = {}
"""
To be defined by subclasses.
"""
tree_node = None
@property
def api_type(self):
# By default just lower name of the class. Can and should be
# overwritten.
return self.__class__.__name__.lower()
def py__getitem__(self, index_context_set, contextualized_node):
from jedi.evaluate import analysis
# TODO this context is probably not right.
analysis.add(
contextualized_node.context,
'type-error-not-subscriptable',
contextualized_node.node,
message="TypeError: '%s' object is not subscriptable" % self
)
return NO_CONTEXTS
def py__iter__(self, contextualized_node=None):
if contextualized_node is not None:
from jedi.evaluate import analysis
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node.node,
message="TypeError: '%s' object is not iterable" % self)
return iter([])
def get_signatures(self):
return []
def is_class(self):
return False
def is_instance(self):
return False
def is_function(self):
return False
def is_module(self):
return False
def is_namespace(self):
return False
def is_compiled(self):
return False
def is_bound_method(self):
return False
def py__bool__(self):
"""
Since Wrapper is a super class for classes, functions and modules,
the return value will always be true.
"""
return True
def py__doc__(self):
try:
self.tree_node.get_doc_node
except AttributeError:
return ''
else:
return clean_scope_docstring(self.tree_node)
return None
def get_safe_value(self, default=_sentinel):
if default is _sentinel:
raise ValueError("There exists no safe value for context %s" % self)
return default
def py__call__(self, arguments):
debug.warning("no execution possible %s", self)
return NO_CONTEXTS
def py__stop_iteration_returns(self):
debug.warning("Not possible to return the stop iterations of %s", self)
return NO_CONTEXTS
def get_qualified_names(self):
# Returns Optional[Tuple[str, ...]]
return None
def is_stub(self):
# The root context knows if it's a stub or not.
return self.parent_context.is_stub()
def iterate_contexts(contexts, contextualized_node=None, is_async=False):
"""
Calls `iterate`, on all contexts but ignores the ordering and just returns
all contexts that the iterate functions yield.
"""
return ContextSet.from_sets(
lazy_context.infer()
for lazy_context in contexts.iterate(contextualized_node, is_async=is_async)
)
class _ContextWrapperBase(HelperContextMixin):
predefined_names = {}
@safe_property
def name(self):
from jedi.evaluate.names import ContextName
wrapped_name = self._wrapped_context.name
if wrapped_name.tree_name is not None:
return ContextName(self, wrapped_name.tree_name)
else:
from jedi.evaluate.compiled import CompiledContextName
return CompiledContextName(self, wrapped_name.string_name)
@classmethod
@evaluator_as_method_param_cache()
def create_cached(cls, evaluator, *args, **kwargs):
return cls(*args, **kwargs)
def __getattr__(self, name):
assert name != '_wrapped_context', 'Problem with _get_wrapped_context'
return getattr(self._wrapped_context, name)
class LazyContextWrapper(_ContextWrapperBase):
@safe_property
@memoize_method
def _wrapped_context(self):
with debug.increase_indent_cm('Resolve lazy context wrapper'):
return self._get_wrapped_context()
def __repr__(self):
return '<%s>' % (self.__class__.__name__)
def _get_wrapped_context(self):
raise NotImplementedError
class ContextWrapper(_ContextWrapperBase):
def __init__(self, wrapped_context):
self._wrapped_context = wrapped_context
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._wrapped_context)
class TreeContext(Context):
def __init__(self, evaluator, parent_context, tree_node):
super(TreeContext, self).__init__(evaluator, parent_context)
self.predefined_names = {}
self.tree_node = tree_node
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
class ContextualizedNode(object):
def __init__(self, context, node):
self.context = context
self.node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.eval_node(self.node)
def __repr__(self):
return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)
class ContextualizedName(ContextualizedNode):
# TODO merge with TreeNameDefinition?!
@property
def name(self):
return self.node
def assignment_indexes(self):
"""
Returns an array of tuple(int, node) of the indexes that are used in
tuple assignments.
For example if the name is ``y`` in the following code::
x, (y, z) = 2, ''
would result in ``[(1, xyz_node), (0, yz_node)]``.
When searching for b in the case ``a, *b, c = [...]`` it will return::
[(slice(1, -1), abc_node)]
"""
indexes = []
is_star_expr = False
node = self.node.parent
compare = self.node
while node is not None:
if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):
for i, child in enumerate(node.children):
if child == compare:
index = int(i / 2)
if is_star_expr:
from_end = int((len(node.children) - i) / 2)
index = slice(index, -from_end)
indexes.insert(0, (index, node))
break
else:
raise LookupError("Couldn't find the assignment.")
is_star_expr = False
elif node.type == 'star_expr':
is_star_expr = True
elif isinstance(node, (ExprStmt, SyncCompFor)):
break
compare = node
node = node.parent
return indexes
def _getitem(context, index_contexts, contextualized_node):
from jedi.evaluate.context.iterable import Slice
# The actual getitem call.
simple_getitem = getattr(context, 'py__simple_getitem__', None)
result = NO_CONTEXTS
unused_contexts = set()
for index_context in index_contexts:
if simple_getitem is not None:
index = index_context
if isinstance(index_context, Slice):
index = index.obj
try:
method = index.get_safe_value
except AttributeError:
pass
else:
index = method(default=None)
if type(index) in (float, int, str, unicode, slice, bytes):
try:
result |= simple_getitem(index)
continue
except SimpleGetItemNotFound:
pass
unused_contexts.add(index_context)
# The index was somehow not good enough or simply a wrong type.
# Therefore we now iterate through all the contexts and just take
# all results.
if unused_contexts or not index_contexts:
result |= context.py__getitem__(
ContextSet(unused_contexts),
contextualized_node
)
debug.dbg('py__getitem__ result: %s', result)
return result
class ContextSet(BaseContextSet):
def py__class__(self):
return ContextSet(c.py__class__() for c in self._set)
def iterate(self, contextualized_node=None, is_async=False):
from jedi.evaluate.lazy_context import get_merged_lazy_context
type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set]
for lazy_contexts in zip_longest(*type_iters):
yield get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
def execute(self, arguments):
return ContextSet.from_sets(c.evaluator.execute(c, arguments) for c in self._set)
def execute_evaluated(self, *args, **kwargs):
return ContextSet.from_sets(c.execute_evaluated(*args, **kwargs) for c in self._set)
def py__getattribute__(self, *args, **kwargs):
if kwargs.get('is_goto'):
return reduce(add, [c.py__getattribute__(*args, **kwargs) for c in self._set], [])
return ContextSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set)
def get_item(self, *args, **kwargs):
return ContextSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set)
def try_merge(self, function_name):
context_set = self.__class__([])
for c in self._set:
try:
method = getattr(c, function_name)
except AttributeError:
pass
else:
context_set |= method()
return context_set
def gather_annotation_classes(self):
return ContextSet.from_sets([c.gather_annotation_classes() for c in self._set])
def get_signatures(self):
return [sig for c in self._set for sig in c.get_signatures()]
NO_CONTEXTS = ContextSet([])
def iterator_to_context_set(func):
def wrapper(*args, **kwargs):
return ContextSet(func(*args, **kwargs))
return wrapper
| [
"jedi._compatibility.zip_longest",
"jedi.evaluate.analysis.add",
"jedi.evaluate.names.ContextName",
"jedi.debug.dbg",
"jedi.evaluate.compiled.CompiledContextName",
"jedi.evaluate.cache.evaluator_as_method_param_cache",
"jedi.evaluate.finder.NameFinder",
"jedi.evaluate.lazy_context.get_merged_lazy_context",
"jedi.debug.increase_indent_cm",
"jedi.debug.warning",
"jedi.parser_utils.clean_scope_docstring"
] | [((1093, 1126), 'jedi.evaluate.cache.evaluator_as_method_param_cache', 'evaluator_as_method_param_cache', ([], {}), '()\n', (1124, 1126), False, 'from jedi.evaluate.cache import evaluator_as_method_param_cache\n'), ((8011, 8044), 'jedi.evaluate.cache.evaluator_as_method_param_cache', 'evaluator_as_method_param_cache', ([], {}), '()\n', (8042, 8044), False, 'from jedi.evaluate.cache import evaluator_as_method_param_cache\n'), ((12510, 12555), 'jedi.debug.dbg', 'debug.dbg', (['"""py__getitem__ result: %s"""', 'result'], {}), "('py__getitem__ result: %s', result)\n", (12519, 12555), False, 'from jedi import debug\n'), ((2358, 2471), 'jedi.evaluate.finder.NameFinder', 'finder.NameFinder', (['self.evaluator', 'self', 'name_context', 'name_or_str', 'position'], {'analysis_errors': 'analysis_errors'}), '(self.evaluator, self, name_context, name_or_str, position,\n analysis_errors=analysis_errors)\n', (2375, 2471), False, 'from jedi.evaluate import finder\n'), ((3260, 3289), 'jedi.debug.dbg', 'debug.dbg', (['"""iterate %s"""', 'self'], {}), "('iterate %s', self)\n", (3269, 3289), False, 'from jedi import debug\n'), ((5002, 5172), 'jedi.evaluate.analysis.add', 'analysis.add', (['contextualized_node.context', '"""type-error-not-subscriptable"""', 'contextualized_node.node'], {'message': '("TypeError: \'%s\' object is not subscriptable" % self)'}), '(contextualized_node.context, \'type-error-not-subscriptable\',\n contextualized_node.node, message=\n "TypeError: \'%s\' object is not subscriptable" % self)\n', (5014, 5172), False, 'from jedi.evaluate import analysis\n'), ((6686, 6733), 'jedi.debug.warning', 'debug.warning', (['"""no execution possible %s"""', 'self'], {}), "('no execution possible %s', self)\n", (6699, 6733), False, 'from jedi import debug\n'), ((6812, 6883), 'jedi.debug.warning', 'debug.warning', (['"""Not possible to return the stop iterations of %s"""', 'self'], {}), "('Not possible to return the stop iterations of %s', self)\n", (6825, 6883), False, 'from jedi import debug\n'), ((12957, 12981), 'jedi._compatibility.zip_longest', 'zip_longest', (['*type_iters'], {}), '(*type_iters)\n', (12968, 12981), False, 'from jedi._compatibility import zip_longest, unicode\n'), ((2814, 2873), 'jedi.debug.warning', 'debug.warning', (['"""Tried to run __await__ on context %s"""', 'self'], {}), "('Tried to run __await__ on context %s', self)\n", (2827, 2873), False, 'from jedi import debug\n'), ((5405, 5565), 'jedi.evaluate.analysis.add', 'analysis.add', (['contextualized_node.context', '"""type-error-not-iterable"""', 'contextualized_node.node'], {'message': '("TypeError: \'%s\' object is not iterable" % self)'}), '(contextualized_node.context, \'type-error-not-iterable\',\n contextualized_node.node, message=\n "TypeError: \'%s\' object is not iterable" % self)\n', (5417, 5565), False, 'from jedi.evaluate import analysis\n'), ((6395, 6432), 'jedi.parser_utils.clean_scope_docstring', 'clean_scope_docstring', (['self.tree_node'], {}), '(self.tree_node)\n', (6416, 6432), False, 'from jedi.parser_utils import clean_scope_docstring\n'), ((7794, 7835), 'jedi.evaluate.names.ContextName', 'ContextName', (['self', 'wrapped_name.tree_name'], {}), '(self, wrapped_name.tree_name)\n', (7805, 7835), False, 'from jedi.evaluate.names import ContextName\n'), ((7936, 7987), 'jedi.evaluate.compiled.CompiledContextName', 'CompiledContextName', (['self', 'wrapped_name.string_name'], {}), '(self, wrapped_name.string_name)\n', (7955, 7987), False, 'from jedi.evaluate.compiled import CompiledContextName\n'), ((8435, 8491), 'jedi.debug.increase_indent_cm', 'debug.increase_indent_cm', (['"""Resolve lazy context wrapper"""'], {}), "('Resolve lazy context wrapper')\n", (8459, 8491), False, 'from jedi import debug\n'), ((13001, 13069), 'jedi.evaluate.lazy_context.get_merged_lazy_context', 'get_merged_lazy_context', (['[l for l in lazy_contexts if l is not None]'], {}), '([l for l in lazy_contexts if l is not None])\n', (13024, 13069), False, 'from jedi.evaluate.lazy_context import get_merged_lazy_context\n')] |
from itertools import groupby
class Solution:
def countAndSay(self, n):
def gen(s):
return "".join(str(len(list(g))) + k for k, g in groupby(s))
s, i = "1", 1
while i < n:
s = gen(s)
i += 1
return s
| [
"itertools.groupby"
] | [((159, 169), 'itertools.groupby', 'groupby', (['s'], {}), '(s)\n', (166, 169), False, 'from itertools import groupby\n')] |
"""Test for .prep.read module
"""
from hidrokit.prep import read
import numpy as np
import pandas as pd
A = pd.DataFrame(
data=[
[1, 3, 4, np.nan, 2, np.nan],
[np.nan, 2, 3, np.nan, 1, 4],
[2, np.nan, 1, 3, 4, np.nan]
],
columns=['A', 'B', 'C', 'D', 'E', 'F']
)
A_date = A.set_index(pd.date_range("20190617", "20190619"))
res_A_number = {'A': [1], 'B': [2], 'C': [], 'D': [0, 1], 'E': [], 'F': [0, 2]}
res_A_date = {'A': ['0618'], 'B': ['0619'], 'C': [],
'D': ['0617', '0618'], 'E': [], 'F': ['0617', '0619']}
def test_read_number():
test = read.missing_row(A, date_index=False)
assert test.items() == res_A_number.items()
def test_read_date():
test = read.missing_row(A_date, date_format="%m%d")
assert test.items() == res_A_date.items()
| [
"pandas.DataFrame",
"hidrokit.prep.read.missing_row",
"pandas.date_range"
] | [((110, 264), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[[1, 3, 4, np.nan, 2, np.nan], [np.nan, 2, 3, np.nan, 1, 4], [2, np.nan, 1,\n 3, 4, np.nan]]', 'columns': "['A', 'B', 'C', 'D', 'E', 'F']"}), "(data=[[1, 3, 4, np.nan, 2, np.nan], [np.nan, 2, 3, np.nan, 1, \n 4], [2, np.nan, 1, 3, 4, np.nan]], columns=['A', 'B', 'C', 'D', 'E', 'F'])\n", (122, 264), True, 'import pandas as pd\n'), ((322, 359), 'pandas.date_range', 'pd.date_range', (['"""20190617"""', '"""20190619"""'], {}), "('20190617', '20190619')\n", (335, 359), True, 'import pandas as pd\n'), ((601, 638), 'hidrokit.prep.read.missing_row', 'read.missing_row', (['A'], {'date_index': '(False)'}), '(A, date_index=False)\n', (617, 638), False, 'from hidrokit.prep import read\n'), ((722, 766), 'hidrokit.prep.read.missing_row', 'read.missing_row', (['A_date'], {'date_format': '"""%m%d"""'}), "(A_date, date_format='%m%d')\n", (738, 766), False, 'from hidrokit.prep import read\n')] |
from flasgger import swag_from
from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth
from app.controllers.department_controller import DepartmentController
url_prefix = '{}/departments'.format(BaseBlueprint.base_url_prefix)
department_blueprint = Blueprint('department', __name__, url_prefix=url_prefix)
department_controller = DepartmentController(request)
@department_blueprint.route('/', methods=['GET'])
@Auth.has_permission('view_department')
@swag_from('documentation/get_all_departments.yml')
def list_departments():
return department_controller.list_departments()
@department_blueprint.route('/<int:department_id>', methods=['GET'])
@Auth.has_permission('view_department')
@swag_from('documentation/get_single_department.yml')
def get_department(department_id):
return department_controller.get_department(department_id)
@department_blueprint.route('/', methods=['POST'])
@Auth.has_role('admin')
@Security.validator(['name|required:ifExists_Department_name', 'description|required'])
@swag_from('documentation/create_department.yml')
def create_department():
return department_controller.create_department()
@department_blueprint.route('/<int:department_id>', methods=['DELETE'])
@Auth.has_role('admin')
@swag_from('documentation/delete_department.yml')
def delete_department(department_id):
return department_controller.delete_department(department_id)
@department_blueprint.route('/<int:department_id>', methods=['PATCH'])
@Auth.has_role('admin')
@Security.validator(['name|optional', 'description|optional'])
@swag_from('documentation/update_department.yml')
def update_department(department_id):
return department_controller.update_department(department_id)
| [
"flasgger.swag_from",
"app.controllers.department_controller.DepartmentController",
"app.blueprints.base_blueprint.Auth.has_role",
"app.blueprints.base_blueprint.Security.validator",
"app.blueprints.base_blueprint.Blueprint",
"app.blueprints.base_blueprint.Auth.has_permission"
] | [((287, 343), 'app.blueprints.base_blueprint.Blueprint', 'Blueprint', (['"""department"""', '__name__'], {'url_prefix': 'url_prefix'}), "('department', __name__, url_prefix=url_prefix)\n", (296, 343), False, 'from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth\n'), ((368, 397), 'app.controllers.department_controller.DepartmentController', 'DepartmentController', (['request'], {}), '(request)\n', (388, 397), False, 'from app.controllers.department_controller import DepartmentController\n'), ((451, 489), 'app.blueprints.base_blueprint.Auth.has_permission', 'Auth.has_permission', (['"""view_department"""'], {}), "('view_department')\n", (470, 489), False, 'from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth\n'), ((491, 541), 'flasgger.swag_from', 'swag_from', (['"""documentation/get_all_departments.yml"""'], {}), "('documentation/get_all_departments.yml')\n", (500, 541), False, 'from flasgger import swag_from\n'), ((686, 724), 'app.blueprints.base_blueprint.Auth.has_permission', 'Auth.has_permission', (['"""view_department"""'], {}), "('view_department')\n", (705, 724), False, 'from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth\n'), ((726, 778), 'flasgger.swag_from', 'swag_from', (['"""documentation/get_single_department.yml"""'], {}), "('documentation/get_single_department.yml')\n", (735, 778), False, 'from flasgger import swag_from\n'), ((927, 949), 'app.blueprints.base_blueprint.Auth.has_role', 'Auth.has_role', (['"""admin"""'], {}), "('admin')\n", (940, 949), False, 'from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth\n'), ((951, 1041), 'app.blueprints.base_blueprint.Security.validator', 'Security.validator', (["['name|required:ifExists_Department_name', 'description|required']"], {}), "(['name|required:ifExists_Department_name',\n 'description|required'])\n", (969, 1041), False, 'from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth\n'), ((1039, 1087), 'flasgger.swag_from', 'swag_from', (['"""documentation/create_department.yml"""'], {}), "('documentation/create_department.yml')\n", (1048, 1087), False, 'from flasgger import swag_from\n'), ((1237, 1259), 'app.blueprints.base_blueprint.Auth.has_role', 'Auth.has_role', (['"""admin"""'], {}), "('admin')\n", (1250, 1259), False, 'from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth\n'), ((1261, 1309), 'flasgger.swag_from', 'swag_from', (['"""documentation/delete_department.yml"""'], {}), "('documentation/delete_department.yml')\n", (1270, 1309), False, 'from flasgger import swag_from\n'), ((1484, 1506), 'app.blueprints.base_blueprint.Auth.has_role', 'Auth.has_role', (['"""admin"""'], {}), "('admin')\n", (1497, 1506), False, 'from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth\n'), ((1508, 1569), 'app.blueprints.base_blueprint.Security.validator', 'Security.validator', (["['name|optional', 'description|optional']"], {}), "(['name|optional', 'description|optional'])\n", (1526, 1569), False, 'from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth\n'), ((1571, 1619), 'flasgger.swag_from', 'swag_from', (['"""documentation/update_department.yml"""'], {}), "('documentation/update_department.yml')\n", (1580, 1619), False, 'from flasgger import swag_from\n')] |
import json
import aiohttp
async def request(url, payload=None, params=None, headers=None):
headers = {'content-type': 'application/json', **(headers or {})}
data = payload and json.dumps(payload)
async with aiohttp.ClientSession() as client:
async with client.post(
url, data=data, params=params, headers=headers) as resp:
# TODO: Check response status
json_response = await resp.json()
return json_response
async def get_updates(base_url, timeout, offset):
params = {
'timeout': timeout,
'offset': offset
}
return await request(f'{base_url}/getUpdates', params=params)
async def send_message(base_url, chat_id, text, reply_markup=None):
payload = {
'chat_id': chat_id,
'text': text
}
if reply_markup is not None:
payload['reply_markup'] = reply_markup
return await request(f'{base_url}/sendMessage', payload)
async def answer_callback_query(
base_url, callback_query_id, text, show_alert,
url=None, cache_time=None):
payload = {
'callback_query_id': callback_query_id,
'text': text,
'show_alert': show_alert
}
if url is not None:
payload['url'] = url
if cache_time is not None:
payload['cache_time'] = cache_time
return await request(f'{base_url}/answerCallbackQuery', payload)
| [
"aiohttp.ClientSession",
"json.dumps"
] | [((188, 207), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (198, 207), False, 'import json\n'), ((223, 246), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (244, 246), False, 'import aiohttp\n')] |
"""Unit tests for nautobot_device_onboarding.netdev_keeper module and its classes.
(c) 2020-2021 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from socket import gaierror
from unittest import mock
from django.test import TestCase
from nautobot.dcim.models import Site, DeviceRole, Platform
from nautobot_device_onboarding.exceptions import OnboardException
from nautobot_device_onboarding.helpers import onboarding_task_fqdn_to_ip
from nautobot_device_onboarding.models import OnboardingTask
class NetdevKeeperTestCase(TestCase):
"""Test the NetdevKeeper Class."""
def setUp(self):
"""Create a superuser and token for API calls."""
self.site1 = Site.objects.create(name="USWEST", slug="uswest")
self.device_role1 = DeviceRole.objects.create(name="Firewall", slug="firewall")
self.platform1 = Platform.objects.create(name="JunOS", slug="junos", napalm_driver="junos")
# self.platform2 = Platform.objects.create(name="Cisco NX-OS", slug="cisco-nx-os")
self.onboarding_task4 = OnboardingTask.objects.create(
ip_address="ntc123.local", site=self.site1, role=self.device_role1, platform=self.platform1
)
self.onboarding_task5 = OnboardingTask.objects.create(
ip_address="bad.local", site=self.site1, role=self.device_role1, platform=self.platform1
)
self.onboarding_task7 = OnboardingTask.objects.create(
ip_address="192.0.2.1/32", site=self.site1, role=self.device_role1, platform=self.platform1
)
@mock.patch("nautobot_device_onboarding.helpers.socket.gethostbyname")
def test_check_ip(self, mock_get_hostbyname):
"""Check DNS to IP address."""
# Look up response value
mock_get_hostbyname.return_value = "192.0.2.1"
# FQDN -> IP
onboarding_task_fqdn_to_ip(ot=self.onboarding_task4)
# Run the check to change the IP address
self.assertEqual(self.onboarding_task4.ip_address, "192.0.2.1")
@mock.patch("nautobot_device_onboarding.helpers.socket.gethostbyname")
def test_failed_check_ip(self, mock_get_hostbyname):
"""Check DNS to IP address failing."""
# Look up a failed response
mock_get_hostbyname.side_effect = gaierror(8)
# Check for bad.local raising an exception
with self.assertRaises(OnboardException) as exc_info:
onboarding_task_fqdn_to_ip(ot=self.onboarding_task5)
self.assertEqual(exc_info.exception.message, "ERROR failed to complete DNS lookup: bad.local")
self.assertEqual(exc_info.exception.reason, "fail-dns")
# Check for exception with prefix address entered
with self.assertRaises(OnboardException) as exc_info:
onboarding_task_fqdn_to_ip(ot=self.onboarding_task7)
self.assertEqual(exc_info.exception.reason, "fail-prefix")
self.assertEqual(exc_info.exception.message, "ERROR appears a prefix was entered: 192.0.2.1/32")
| [
"nautobot.dcim.models.Site.objects.create",
"nautobot.dcim.models.Platform.objects.create",
"nautobot_device_onboarding.models.OnboardingTask.objects.create",
"nautobot_device_onboarding.helpers.onboarding_task_fqdn_to_ip",
"unittest.mock.patch",
"nautobot.dcim.models.DeviceRole.objects.create",
"socket.gaierror"
] | [((2037, 2106), 'unittest.mock.patch', 'mock.patch', (['"""nautobot_device_onboarding.helpers.socket.gethostbyname"""'], {}), "('nautobot_device_onboarding.helpers.socket.gethostbyname')\n", (2047, 2106), False, 'from unittest import mock\n'), ((2495, 2564), 'unittest.mock.patch', 'mock.patch', (['"""nautobot_device_onboarding.helpers.socket.gethostbyname"""'], {}), "('nautobot_device_onboarding.helpers.socket.gethostbyname')\n", (2505, 2564), False, 'from unittest import mock\n'), ((1170, 1219), 'nautobot.dcim.models.Site.objects.create', 'Site.objects.create', ([], {'name': '"""USWEST"""', 'slug': '"""uswest"""'}), "(name='USWEST', slug='uswest')\n", (1189, 1219), False, 'from nautobot.dcim.models import Site, DeviceRole, Platform\n'), ((1248, 1307), 'nautobot.dcim.models.DeviceRole.objects.create', 'DeviceRole.objects.create', ([], {'name': '"""Firewall"""', 'slug': '"""firewall"""'}), "(name='Firewall', slug='firewall')\n", (1273, 1307), False, 'from nautobot.dcim.models import Site, DeviceRole, Platform\n'), ((1334, 1408), 'nautobot.dcim.models.Platform.objects.create', 'Platform.objects.create', ([], {'name': '"""JunOS"""', 'slug': '"""junos"""', 'napalm_driver': '"""junos"""'}), "(name='JunOS', slug='junos', napalm_driver='junos')\n", (1357, 1408), False, 'from nautobot.dcim.models import Site, DeviceRole, Platform\n'), ((1533, 1659), 'nautobot_device_onboarding.models.OnboardingTask.objects.create', 'OnboardingTask.objects.create', ([], {'ip_address': '"""ntc123.local"""', 'site': 'self.site1', 'role': 'self.device_role1', 'platform': 'self.platform1'}), "(ip_address='ntc123.local', site=self.site1,\n role=self.device_role1, platform=self.platform1)\n", (1562, 1659), False, 'from nautobot_device_onboarding.models import OnboardingTask\n'), ((1711, 1835), 'nautobot_device_onboarding.models.OnboardingTask.objects.create', 'OnboardingTask.objects.create', ([], {'ip_address': '"""bad.local"""', 'site': 'self.site1', 'role': 'self.device_role1', 'platform': 'self.platform1'}), "(ip_address='bad.local', site=self.site1, role\n =self.device_role1, platform=self.platform1)\n", (1740, 1835), False, 'from nautobot_device_onboarding.models import OnboardingTask\n'), ((1886, 2012), 'nautobot_device_onboarding.models.OnboardingTask.objects.create', 'OnboardingTask.objects.create', ([], {'ip_address': '"""192.0.2.1/32"""', 'site': 'self.site1', 'role': 'self.device_role1', 'platform': 'self.platform1'}), "(ip_address='192.0.2.1/32', site=self.site1,\n role=self.device_role1, platform=self.platform1)\n", (1915, 2012), False, 'from nautobot_device_onboarding.models import OnboardingTask\n'), ((2314, 2366), 'nautobot_device_onboarding.helpers.onboarding_task_fqdn_to_ip', 'onboarding_task_fqdn_to_ip', ([], {'ot': 'self.onboarding_task4'}), '(ot=self.onboarding_task4)\n', (2340, 2366), False, 'from nautobot_device_onboarding.helpers import onboarding_task_fqdn_to_ip\n'), ((2747, 2758), 'socket.gaierror', 'gaierror', (['(8)'], {}), '(8)\n', (2755, 2758), False, 'from socket import gaierror\n'), ((2885, 2937), 'nautobot_device_onboarding.helpers.onboarding_task_fqdn_to_ip', 'onboarding_task_fqdn_to_ip', ([], {'ot': 'self.onboarding_task5'}), '(ot=self.onboarding_task5)\n', (2911, 2937), False, 'from nautobot_device_onboarding.helpers import onboarding_task_fqdn_to_ip\n'), ((3246, 3298), 'nautobot_device_onboarding.helpers.onboarding_task_fqdn_to_ip', 'onboarding_task_fqdn_to_ip', ([], {'ot': 'self.onboarding_task7'}), '(ot=self.onboarding_task7)\n', (3272, 3298), False, 'from nautobot_device_onboarding.helpers import onboarding_task_fqdn_to_ip\n')] |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Test for the piezo tensor class
"""
__author__ = "<NAME>"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "4/1/16"
import os
import unittest
import numpy as np
from pymatgen.analysis.piezo import PiezoTensor
from pymatgen.util.testing import PymatgenTest
class PiezoTest(PymatgenTest):
def setUp(self):
self.piezo_struc = self.get_structure("BaNiO3")
self.voigt_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0],
[0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.vasp_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839],
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.full_tensor_array = [
[[0.0, 0.0, 0.03839], [0.0, 0.0, 0.0], [0.03839, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.03839], [0.0, 0.03839, 0.0]],
[[6.89822, 0.0, 0.0], [0.0, 6.89822, 0.0], [0.0, 0.0, 27.4628]],
]
def test_new(self):
pt = PiezoTensor(self.full_tensor_array)
self.assertArrayAlmostEqual(pt, self.full_tensor_array)
bad_dim_array = np.zeros((3, 3))
self.assertRaises(ValueError, PiezoTensor, bad_dim_array)
def test_from_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_voigt(self.voigt_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
def test_from_vasp_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_vasp_voigt(self.vasp_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
if __name__ == "__main__":
unittest.main()
| [
"pymatgen.analysis.piezo.PiezoTensor.from_vasp_voigt",
"pymatgen.analysis.piezo.PiezoTensor",
"numpy.array",
"numpy.zeros",
"pymatgen.analysis.piezo.PiezoTensor.from_voigt",
"unittest.main"
] | [((2195, 2210), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2208, 2210), False, 'import unittest\n'), ((554, 684), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0], [0.0, 0.0, 0.0, 0.03839, 0.0, 0.0], [\n 6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0], [0.0, 0.0, 0.0, 0.03839, 0.0,\n 0.0], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]])\n', (562, 684), True, 'import numpy as np\n'), ((794, 929), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839], [0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0\n ], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839], [0.0, 0.0, 0.0, 0.0, 0.03839,\n 0.0, 0.0], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]])\n', (802, 929), True, 'import numpy as np\n'), ((1318, 1353), 'pymatgen.analysis.piezo.PiezoTensor', 'PiezoTensor', (['self.full_tensor_array'], {}), '(self.full_tensor_array)\n', (1329, 1353), False, 'from pymatgen.analysis.piezo import PiezoTensor\n'), ((1442, 1458), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1450, 1458), True, 'import numpy as np\n'), ((1577, 1593), 'numpy.zeros', 'np.zeros', (['(3, 7)'], {}), '((3, 7))\n', (1585, 1593), True, 'import numpy as np\n'), ((1607, 1648), 'pymatgen.analysis.piezo.PiezoTensor.from_voigt', 'PiezoTensor.from_voigt', (['self.voigt_matrix'], {}), '(self.voigt_matrix)\n', (1629, 1648), False, 'from pymatgen.analysis.piezo import PiezoTensor\n'), ((1896, 1912), 'numpy.zeros', 'np.zeros', (['(3, 7)'], {}), '((3, 7))\n', (1904, 1912), True, 'import numpy as np\n'), ((1926, 1971), 'pymatgen.analysis.piezo.PiezoTensor.from_vasp_voigt', 'PiezoTensor.from_vasp_voigt', (['self.vasp_matrix'], {}), '(self.vasp_matrix)\n', (1953, 1971), False, 'from pymatgen.analysis.piezo import PiezoTensor\n')] |
from direct.showbase import DirectObject
from otp.otpbase import OTPGlobals
import sys
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from otp.otpbase import OTPLocalizer
class ChatInputNormal(DirectObject.DirectObject):
ExecNamespace = None
def __init__(self, chatMgr):
self.chatMgr = chatMgr
self.normalPos = Vec3(-1.083, 0, 0.804)
self.whisperPos = Vec3(0.0, 0, 0.71)
self.whisperAvatarName = None
self.whisperAvatarId = None
self.toPlayer = 0
wantHistory = 0
if __dev__:
wantHistory = 1
self.wantHistory = base.config.GetBool('want-chat-history', wantHistory)
self.history = ['']
self.historySize = base.config.GetInt('chat-history-size', 10)
self.historyIndex = 0
return
def typeCallback(self, extraArgs):
messenger.send('enterNormalChat')
def delete(self):
self.ignore('arrow_up-up')
self.ignore('arrow_down-up')
self.chatFrame.destroy()
del self.chatFrame
del self.chatButton
del self.cancelButton
del self.chatEntry
del self.whisperLabel
del self.chatMgr
def activateByData(self, whisperAvatarId = None, toPlayer = 0):
self.toPlayer = toPlayer
self.whisperAvatarId = whisperAvatarId
self.whisperAvatarName = base.talkAssistant.findName(self.whisperAvatarId, self.toPlayer)
if self.whisperAvatarId:
self.chatFrame.setPos(self.whisperPos)
self.whisperLabel['text'] = OTPLocalizer.ChatInputWhisperLabel % self.whisperAvatarName
self.whisperLabel.show()
else:
self.chatFrame.setPos(self.normalPos)
self.whisperLabel.hide()
self.chatEntry['focus'] = 1
self.chatFrame.show()
if self.wantHistory:
self.accept('arrow_up-up', self.getPrevHistory)
self.accept('arrow_down-up', self.getNextHistory)
def deactivate(self):
self.chatEntry.set('')
self.chatEntry['focus'] = 0
self.chatFrame.hide()
self.whisperLabel.hide()
base.win.closeIme()
self.ignore('arrow_up-up')
self.ignore('arrow_down-up')
def checkForOverRide(self):
return False
def sendChat(self, text):
if self.checkForOverRide():
self.chatEntry.enterText('')
return
self.deactivate()
self.chatMgr.fsm.request('mainMenu')
if text:
if self.toPlayer:
if self.whisperAvatarId:
self.whisperAvatarName = None
self.whisperAvatarId = None
self.toPlayer = 0
elif self.whisperAvatarId:
self.chatMgr.sendWhisperString(text, self.whisperAvatarId)
self.whisperAvatarName = None
self.whisperAvatarId = None
else:
if self.chatMgr.execChat:
if text[0] == '>':
text = self.__execMessage(text[1:])
base.localAvatar.setChatAbsolute(text, CFSpeech | CFTimeout)
return
base.talkAssistant.sendOpenTalk(text)
if self.wantHistory:
self.addToHistory(text)
return
def chatOverflow(self, overflowText):
self.sendChat(self.chatEntry.get())
def __execMessage(self, message):
if not ChatInputNormal.ExecNamespace:
ChatInputNormal.ExecNamespace = {}
exec('from pandac.PandaModules import *', globals(), self.ExecNamespace)
self.importExecNamespace()
try:
if not isClient():
print('EXECWARNING ChatInputNormal eval: %s' % message)
printStack()
return str(eval(message, globals(), ChatInputNormal.ExecNamespace))
except SyntaxError:
try:
if not isClient():
print('EXECWARNING ChatInputNormal exec: %s' % message)
printStack()
exec(message, globals(), ChatInputNormal.ExecNamespace)
return 'ok'
except:
exception = sys.exc_info()[0]
extraInfo = sys.exc_info()[1]
if extraInfo:
return str(extraInfo)
else:
return str(exception)
except:
exception = sys.exc_info()[0]
extraInfo = sys.exc_info()[1]
if extraInfo:
return str(extraInfo)
else:
return str(exception)
def cancelButtonPressed(self):
self.chatEntry.set('')
self.chatMgr.fsm.request('mainMenu')
def chatButtonPressed(self):
self.sendChat(self.chatEntry.get())
def importExecNamespace(self):
pass
def addToHistory(self, text):
self.history = [text] + self.history[:self.historySize - 1]
self.historyIndex = 0
def getPrevHistory(self):
self.chatEntry.set(self.history[self.historyIndex])
self.historyIndex += 1
self.historyIndex %= len(self.history)
def getNextHistory(self):
self.chatEntry.set(self.history[self.historyIndex])
self.historyIndex -= 1
self.historyIndex %= len(self.history)
def setPos(self, posX, posY = None, posZ = None):
if posX and posY and posZ:
self.chatFrame.setPos(posX, posY, posZ)
else:
self.chatFrame.setPos(posX)
| [
"sys.exc_info"
] | [((4495, 4509), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4507, 4509), False, 'import sys\n'), ((4537, 4551), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4549, 4551), False, 'import sys\n'), ((4254, 4268), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4266, 4268), False, 'import sys\n'), ((4300, 4314), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4312, 4314), False, 'import sys\n')] |
import argparse
import json
import numpy as np
import pandas as pd
import os
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,f1_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import backend as K
from keras.utils.vis_utils import plot_model
from sklearn.externals import joblib
import time
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def get_embeddings(sentences_list,layer_json):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:return: Dictionary with key each sentence of the sentences_list and as value the embedding
'''
sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence
embeddings = dict()##dict with key the index of each sentence and as value the its embedding
sentence_emb = dict()#key:sentence,value:its embedding
with open(sentences_list,'r') as file:
for index,line in enumerate(file):
sentences[index] = line.strip()
with open(layer_json, 'r',encoding='utf-8') as f:
for line in f:
embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features'])
for key,value in sentences.items():
sentence_emb[value] = embeddings[key]
return sentence_emb
def train_classifier(sentences_list,layer_json,dataset_csv,filename):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:param filename: The path of the pickle file that the model will be stored
:return:
'''
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list,layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, length]) # np.append(features,length,axis=1)
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1)
log.fit(X_train, y_train)
#save the model
_ = joblib.dump(log, filename, compress=9)
predictions = log.predict(X_val)
print("###########################################")
print("Results using embeddings from the",layer_json,"file")
print(classification_report(y_val, predictions))
print("F1 score using Logistic Regression:",f1_score(y_val, predictions))
print("###########################################")
#train a DNN
f1_results = list()
for i in range(3):
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
# compile network
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1])
# fit network
model.fit(X_train, y_train, epochs=100, batch_size=64)
loss, f_1 = model.evaluate(X_val, y_val, verbose=1)
print('\nTest F1: %f' % (f_1 * 100))
f1_results.append(f_1)
model = None
print("###########################################")
print("Results using embeddings from the", layer_json, "file")
# evaluate
print(np.mean(f1_results))
print("###########################################")
def parameter_tuning_LR(sentences_list,layer_json,dataset_csv):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:return:
'''
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list,layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, length])
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
C = [0.1,1,2,5,10]
solver = ['newton-cg','saga','sag']
best_params = dict()
best_score = 0.0
for c in C:
for s in solver:
start = time.time()
log = LogisticRegression(random_state=0, solver=s, max_iter=1000, C=c)
log.fit(X_train, y_train)
predictions = log.predict(X_val)
print("###########################################")
print("LR with C =",c,'and solver = ',s)
print("Results using embeddings from the", layer_json, "file")
print(classification_report(y_val, predictions))
f1 = f1_score(y_val, predictions)
if f1 > best_score:
best_score = f1
best_params['c'] = c
best_params['solver'] = s
print("F1 score using Logistic Regression:",f1)
print("###########################################")
end = time.time()
running_time = end - start
print("Running time:"+str(running_time))
def visualize_DNN(file_to_save):
'''
Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd
:param file_to_save: the png file that the architecture of the DNN will be saved.
:return: None
'''
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
plot_model(model, to_file=file_to_save, show_shapes=True)
def save_model(sentences_list,layer_json,dataset_csv,pkl):
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list, layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1)
features = np.column_stack([features, length])
print(features.shape)
log = LogisticRegression(random_state=0, solver='saga', max_iter=1000, C=1)
log.fit(features, label)
_ = joblib.dump(log, pkl, compress=9)
if __name__ == '__main__':
#save_model('sentences_list.txt','Fudan_output_layer_-1.json','train_sentences1.csv','summarizer1.pkl')
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--sentences", required=True, help="sentences list")
ap.add_argument("-o", "--output", required=True, help="output")
ap.add_argument("-ts", "--train set", required=True, help="path to train set")
ap.add_argument("-sp", "--summarizer path", required=True, help="path to save summarizer")
args = vars(ap.parse_args())
layer = train_classifier(args['sentences'], args['output'], args['train set'],args['summarizer path'])
#layer_1 = train_classifier('sentences_list.txt', 'new_output_layer_-1.json', 'train_sentences1.csv','fine_tune_BERT_sentence_classification1.pkl')
#layer_2 = train_classifier('sentences_list.txt','new_output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification2.pkl')
#layer_3 = train_classifier('sentences_list.txt','new_output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification3.pkl')
#layer_4 = train_classifier('sentences_list.txt','new_output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification4.pkl')
#tuning = parameter_tuning_LR('sentences_list.txt','new_output_layer_-1.json','train_sentences1.csv')
#layer_1 = train_classifier('sentences_list.txt','output_layer_-1.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_2 = train_classifier('sentences_list.txt','output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_3 = train_classifier('sentences_list.txt','output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_4 = train_classifier('sentences_list.txt','output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
| [
"pandas.read_csv",
"sklearn.metrics.classification_report",
"keras.utils.vis_utils.plot_model",
"numpy.column_stack",
"keras.layers.Dense",
"numpy.mean",
"argparse.ArgumentParser",
"keras.backend.clip",
"numpy.asarray",
"numpy.concatenate",
"keras.backend.epsilon",
"json.loads",
"sklearn.model_selection.train_test_split",
"keras.models.Sequential",
"sklearn.externals.joblib.dump",
"time.time",
"keras.layers.Dropout",
"sklearn.metrics.f1_score",
"sklearn.linear_model.LogisticRegression",
"numpy.zeros"
] | [((2973, 2997), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (2984, 2997), True, 'import pandas as pd\n'), ((4112, 4136), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (4122, 4136), True, 'import numpy as np\n'), ((4182, 4203), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (4192, 4203), True, 'import numpy as np\n'), ((4249, 4273), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (4259, 4273), True, 'import numpy as np\n'), ((4322, 4346), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (4332, 4346), True, 'import numpy as np\n'), ((4390, 4408), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (4400, 4408), True, 'import numpy as np\n'), ((4445, 4462), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (4455, 4462), True, 'import numpy as np\n'), ((4496, 4571), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (4510, 4571), True, 'import numpy as np\n'), ((4586, 4621), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (4601, 4621), True, 'import numpy as np\n'), ((4723, 4789), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'label'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(features, label, test_size=0.33, random_state=42)\n', (4739, 4789), False, 'from sklearn.model_selection import train_test_split\n'), ((4801, 4877), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""newton-cg"""', 'max_iter': '(1000)', 'C': '(0.1)'}), "(random_state=0, solver='newton-cg', max_iter=1000, C=0.1)\n", (4819, 4877), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4937, 4975), 'sklearn.externals.joblib.dump', 'joblib.dump', (['log', 'filename'], {'compress': '(9)'}), '(log, filename, compress=9)\n', (4948, 4975), False, 'from sklearn.externals import joblib\n'), ((6715, 6739), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (6726, 6739), True, 'import pandas as pd\n'), ((7854, 7878), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (7864, 7878), True, 'import numpy as np\n'), ((7924, 7945), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (7934, 7945), True, 'import numpy as np\n'), ((7991, 8015), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (8001, 8015), True, 'import numpy as np\n'), ((8064, 8088), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (8074, 8088), True, 'import numpy as np\n'), ((8132, 8150), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (8142, 8150), True, 'import numpy as np\n'), ((8187, 8204), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (8197, 8204), True, 'import numpy as np\n'), ((8238, 8313), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (8252, 8313), True, 'import numpy as np\n'), ((8328, 8363), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (8343, 8363), True, 'import numpy as np\n'), ((8428, 8494), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'label'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(features, label, test_size=0.33, random_state=42)\n', (8444, 8494), False, 'from sklearn.model_selection import train_test_split\n'), ((9783, 9795), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9793, 9795), False, 'from keras.models import Sequential\n'), ((10175, 10232), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': 'file_to_save', 'show_shapes': '(True)'}), '(model, to_file=file_to_save, show_shapes=True)\n', (10185, 10232), False, 'from keras.utils.vis_utils import plot_model\n'), ((10309, 10333), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (10320, 10333), True, 'import pandas as pd\n'), ((11448, 11472), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (11458, 11472), True, 'import numpy as np\n'), ((11518, 11539), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (11528, 11539), True, 'import numpy as np\n'), ((11585, 11609), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (11595, 11609), True, 'import numpy as np\n'), ((11658, 11682), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (11668, 11682), True, 'import numpy as np\n'), ((11726, 11744), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (11736, 11744), True, 'import numpy as np\n'), ((11781, 11798), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (11791, 11798), True, 'import numpy as np\n'), ((11832, 11907), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (11846, 11907), True, 'import numpy as np\n'), ((11923, 11958), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (11938, 11958), True, 'import numpy as np\n'), ((11996, 12065), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""saga"""', 'max_iter': '(1000)', 'C': '(1)'}), "(random_state=0, solver='saga', max_iter=1000, C=1)\n", (12014, 12065), False, 'from sklearn.linear_model import LogisticRegression\n'), ((12104, 12137), 'sklearn.externals.joblib.dump', 'joblib.dump', (['log', 'pkl'], {'compress': '(9)'}), '(log, pkl, compress=9)\n', (12115, 12137), False, 'from sklearn.externals import joblib\n'), ((12286, 12311), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12309, 12311), False, 'import argparse\n'), ((5146, 5187), 'sklearn.metrics.classification_report', 'classification_report', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (5167, 5187), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((5237, 5265), 'sklearn.metrics.f1_score', 'f1_score', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (5245, 5265), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((5406, 5418), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5416, 5418), False, 'from keras.models import Sequential\n'), ((6327, 6346), 'numpy.mean', 'np.mean', (['f1_results'], {}), '(f1_results)\n', (6334, 6346), True, 'import numpy as np\n'), ((9810, 9854), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (9815, 9854), False, 'from keras.layers import Dense, Dropout\n'), ((9870, 9915), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(128, activation='relu', trainable=True)\n", (9875, 9915), False, 'from keras.layers import Dense, Dropout\n'), ((9931, 9943), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (9938, 9943), False, 'from keras.layers import Dense, Dropout\n'), ((9960, 10004), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (9965, 10004), False, 'from keras.layers import Dense, Dropout\n'), ((10020, 10033), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (10027, 10033), False, 'from keras.layers import Dense, Dropout\n'), ((10049, 10093), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (10054, 10093), False, 'from keras.layers import Dense, Dropout\n'), ((10109, 10122), 'keras.layers.Dropout', 'Dropout', (['(0.35)'], {}), '(0.35)\n', (10116, 10122), False, 'from keras.layers import Dense, Dropout\n'), ((10138, 10168), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (10143, 10168), False, 'from keras.layers import Dense, Dropout\n'), ((5437, 5481), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5442, 5481), False, 'from keras.layers import Dense, Dropout\n'), ((5501, 5546), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(128, activation='relu', trainable=True)\n", (5506, 5546), False, 'from keras.layers import Dense, Dropout\n'), ((5566, 5578), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (5573, 5578), False, 'from keras.layers import Dense, Dropout\n'), ((5599, 5643), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5604, 5643), False, 'from keras.layers import Dense, Dropout\n'), ((5663, 5676), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5670, 5676), False, 'from keras.layers import Dense, Dropout\n'), ((5696, 5740), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5701, 5740), False, 'from keras.layers import Dense, Dropout\n'), ((5760, 5773), 'keras.layers.Dropout', 'Dropout', (['(0.35)'], {}), '(0.35)\n', (5767, 5773), False, 'from keras.layers import Dense, Dropout\n'), ((5793, 5823), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5798, 5823), False, 'from keras.layers import Dense, Dropout\n'), ((8666, 8677), 'time.time', 'time.time', ([], {}), '()\n', (8675, 8677), False, 'import time\n'), ((8696, 8760), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': 's', 'max_iter': '(1000)', 'C': 'c'}), '(random_state=0, solver=s, max_iter=1000, C=c)\n', (8714, 8760), False, 'from sklearn.linear_model import LogisticRegression\n'), ((9116, 9144), 'sklearn.metrics.f1_score', 'f1_score', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (9124, 9144), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((9431, 9442), 'time.time', 'time.time', ([], {}), '()\n', (9440, 9442), False, 'import time\n'), ((751, 780), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (757, 780), True, 'from keras import backend as K\n'), ((826, 846), 'keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (832, 846), True, 'from keras import backend as K\n'), ((905, 916), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (914, 916), True, 'from keras import backend as K\n'), ((1236, 1265), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (1242, 1265), True, 'from keras import backend as K\n'), ((1312, 1332), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (1318, 1332), True, 'from keras import backend as K\n'), ((1395, 1406), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1404, 1406), True, 'from keras import backend as K\n'), ((1562, 1573), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1571, 1573), True, 'from keras import backend as K\n'), ((3526, 3539), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3534, 3539), True, 'import numpy as np\n'), ((3727, 3740), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3735, 3740), True, 'import numpy as np\n'), ((3864, 3877), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3872, 3877), True, 'import numpy as np\n'), ((4011, 4024), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (4019, 4024), True, 'import numpy as np\n'), ((7268, 7281), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7276, 7281), True, 'import numpy as np\n'), ((7469, 7482), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7477, 7482), True, 'import numpy as np\n'), ((7606, 7619), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7614, 7619), True, 'import numpy as np\n'), ((7753, 7766), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7761, 7766), True, 'import numpy as np\n'), ((9056, 9097), 'sklearn.metrics.classification_report', 'classification_report', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (9077, 9097), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((10862, 10875), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (10870, 10875), True, 'import numpy as np\n'), ((11063, 11076), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11071, 11076), True, 'import numpy as np\n'), ((11200, 11213), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11208, 11213), True, 'import numpy as np\n'), ((11347, 11360), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11355, 11360), True, 'import numpy as np\n'), ((2388, 2404), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2398, 2404), False, 'import json\n'), ((2434, 2450), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2444, 2450), False, 'import json\n')] |
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: <NAME>, <NAME>, <NAME>, <NAME>
Corresponding author: <NAME> (<EMAIL>)
-------------------------------------------------------------------------------------------------
'''
import torch
import numpy as np
from torch import nn
from torch.nn.utils import weight_norm
__author__ = "<NAME>"
def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param):
''' Estimate the collision condition. '''
(veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param
delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle)))
if -1e-6 < delta_angle_2 < 1e-6:
delta_angle_2 = 1e-6
delta_v1_list = []
delta_v2_list = []
# Estimate the collision condition (delat-v) according to the principal impact direction.
for veh_striking in veh_striking_list:
if veh_striking[0] == 1:
veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0])
veh_a2 = np.abs(veh_cgs[1] - veh_striking[3])
veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v)
veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2))
if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]:
veh_e = 2 / veh_RDS
else:
veh_e = 0.5 / veh_RDS
elif veh_striking[0] == 2:
veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0])
veh_a2 = np.abs(veh_cgf[1] - veh_striking[3])
veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2))
veh_RDS = V1_v * np.sin(delta_angle_2)
veh_e = 1.5 / veh_RDS
elif veh_striking[0] == 3:
veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1])
veh_a1 = np.abs(veh_cgs[0] - veh_striking[3])
veh_RDS = np.abs(V2_v * np.cos(delta_angle) - V1_v)
veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(veh_ca + delta_angle_2))
if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]:
veh_e = 2 / veh_RDS
else:
veh_e = 0.5 / veh_RDS
elif veh_striking[0] == 4:
veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1])
veh_a1 = np.abs(veh_cgf[0] - veh_striking[3])
veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2))
veh_RDS = V2_v * np.sin(delta_angle_2)
veh_e = 1.5 / veh_RDS
# Obtain delta-v based on the plane 2-DOF rigid-body collision model with momentum conservation.
veh_y1 = veh_k[0] ** 2 / (veh_a1 ** 2 + veh_k[0] ** 2)
veh_y2 = veh_k[1] ** 2 / (veh_a2 ** 2 + veh_k[1] ** 2)
delta_v1 = (1 + veh_e) * veh_m[1] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2)
delta_v2 = (1 + veh_e) * veh_m[0] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2)
delta_v1_list.append(delta_v1)
delta_v2_list.append(delta_v2)
delta_v1_ = max(delta_v1_list)
delta_v2_ = max(delta_v2_list)
index = delta_v1_list.index(max(delta_v1_list))
return delta_v1_, delta_v2_, index | [
"numpy.abs",
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.arctan"
] | [((723, 742), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (729, 742), True, 'import numpy as np\n'), ((1050, 1084), 'numpy.arctan', 'np.arctan', (['(veh_cgf[0] / veh_cgs[0])'], {}), '(veh_cgf[0] / veh_cgs[0])\n', (1059, 1084), True, 'import numpy as np\n'), ((1106, 1142), 'numpy.abs', 'np.abs', (['(veh_cgs[1] - veh_striking[3])'], {}), '(veh_cgs[1] - veh_striking[3])\n', (1112, 1142), True, 'import numpy as np\n'), ((1579, 1613), 'numpy.arctan', 'np.arctan', (['(veh_cgf[0] / veh_cgs[0])'], {}), '(veh_cgf[0] / veh_cgs[0])\n', (1588, 1613), True, 'import numpy as np\n'), ((1635, 1671), 'numpy.abs', 'np.abs', (['(veh_cgf[1] - veh_striking[3])'], {}), '(veh_cgf[1] - veh_striking[3])\n', (1641, 1671), True, 'import numpy as np\n'), ((1235, 1277), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)'], {}), '(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)\n', (1242, 1277), True, 'import numpy as np\n'), ((1280, 1310), 'numpy.cos', 'np.cos', (['(veh_ca + delta_angle_2)'], {}), '(veh_ca + delta_angle_2)\n', (1286, 1310), True, 'import numpy as np\n'), ((1818, 1839), 'numpy.sin', 'np.sin', (['delta_angle_2'], {}), '(delta_angle_2)\n', (1824, 1839), True, 'import numpy as np\n'), ((1931, 1965), 'numpy.arctan', 'np.arctan', (['(veh_cgf[1] / veh_cgs[1])'], {}), '(veh_cgf[1] / veh_cgs[1])\n', (1940, 1965), True, 'import numpy as np\n'), ((1987, 2023), 'numpy.abs', 'np.abs', (['(veh_cgs[0] - veh_striking[3])'], {}), '(veh_cgs[0] - veh_striking[3])\n', (1993, 2023), True, 'import numpy as np\n'), ((1179, 1198), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (1185, 1198), True, 'import numpy as np\n'), ((1700, 1742), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)'], {}), '(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)\n', (1707, 1742), True, 'import numpy as np\n'), ((1745, 1787), 'numpy.cos', 'np.cos', (['(delta_angle_2 - veh_ca + np.pi / 2)'], {}), '(delta_angle_2 - veh_ca + np.pi / 2)\n', (1751, 1787), True, 'import numpy as np\n'), ((2460, 2494), 'numpy.arctan', 'np.arctan', (['(veh_cgf[1] / veh_cgs[1])'], {}), '(veh_cgf[1] / veh_cgs[1])\n', (2469, 2494), True, 'import numpy as np\n'), ((2516, 2552), 'numpy.abs', 'np.abs', (['(veh_cgf[0] - veh_striking[3])'], {}), '(veh_cgf[0] - veh_striking[3])\n', (2522, 2552), True, 'import numpy as np\n'), ((2116, 2158), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)'], {}), '(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)\n', (2123, 2158), True, 'import numpy as np\n'), ((2161, 2191), 'numpy.cos', 'np.cos', (['(veh_ca + delta_angle_2)'], {}), '(veh_ca + delta_angle_2)\n', (2167, 2191), True, 'import numpy as np\n'), ((2699, 2720), 'numpy.sin', 'np.sin', (['delta_angle_2'], {}), '(delta_angle_2)\n', (2705, 2720), True, 'import numpy as np\n'), ((2060, 2079), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (2066, 2079), True, 'import numpy as np\n'), ((2581, 2623), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)'], {}), '(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)\n', (2588, 2623), True, 'import numpy as np\n'), ((2626, 2668), 'numpy.cos', 'np.cos', (['(delta_angle_2 - veh_ca + np.pi / 2)'], {}), '(delta_angle_2 - veh_ca + np.pi / 2)\n', (2632, 2668), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from copy import deepcopy
from datamanage.pro import exceptions as dm_pro_errors
from datamanage.utils.api import MetaApi
from datamanage.pro.utils.time import utc_to_local, str_to_datetime
from datamanage.pro.lifecycle.models_dict import (
DATASET_CREATE_MAPPINGS,
DATASET_CREATE_EVENT_INFO_DICT,
DataTraceShowType,
ComplexSearchBackendType,
DataTraceFinishStatus,
)
def get_dataset_create_info(dataset_id, dataset_type):
"""获取数据足迹中和数据创建相关信息
:param dataset_id: 数据id
:param dataset_type: 数据类型
:return: 数据创建相关信息
:rtype: list
"""
# 1)从dgraph中获取数据创建相关信息
data_set_create_info_statement = """
{
get_dataset_create_info(func: eq(%s, "%s")){created_by created_at}
}
""" % (
DATASET_CREATE_MAPPINGS[dataset_type]['data_set_pk'],
dataset_id,
)
query_result = MetaApi.complex_search(
{"backend_type": ComplexSearchBackendType.DGRAPH.value, "statement": data_set_create_info_statement}, raw=True
)
create_info_ret = query_result['data']['data']['get_dataset_create_info']
if not (isinstance(create_info_ret, list) and create_info_ret):
raise dm_pro_errors.GetDataSetCreateInfoError(message_kv={'dataset_id': dataset_id})
# 2)得到格式化创建信息
create_trace_dict = deepcopy(DATASET_CREATE_EVENT_INFO_DICT)
create_trace_dict.update(
{
"sub_type": dataset_type,
"sub_type_alias": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'],
"description": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'],
"created_at": utc_to_local(create_info_ret[0]['created_at']),
"created_by": create_info_ret[0]['created_by'],
"show_type": DataTraceShowType.DISPLAY.value,
"datetime": str_to_datetime(utc_to_local(create_info_ret[0]['created_at'])),
"status": DataTraceFinishStatus.STATUS,
"status_alias": DataTraceFinishStatus.STATUS_ALIAS,
}
)
return [create_trace_dict]
| [
"datamanage.pro.exceptions.GetDataSetCreateInfoError",
"datamanage.utils.api.MetaApi.complex_search",
"datamanage.pro.utils.time.utc_to_local",
"copy.deepcopy"
] | [((2222, 2361), 'datamanage.utils.api.MetaApi.complex_search', 'MetaApi.complex_search', (["{'backend_type': ComplexSearchBackendType.DGRAPH.value, 'statement':\n data_set_create_info_statement}"], {'raw': '(True)'}), "({'backend_type': ComplexSearchBackendType.DGRAPH.\n value, 'statement': data_set_create_info_statement}, raw=True)\n", (2244, 2361), False, 'from datamanage.utils.api import MetaApi\n'), ((2653, 2693), 'copy.deepcopy', 'deepcopy', (['DATASET_CREATE_EVENT_INFO_DICT'], {}), '(DATASET_CREATE_EVENT_INFO_DICT)\n', (2661, 2693), False, 'from copy import deepcopy\n'), ((2531, 2609), 'datamanage.pro.exceptions.GetDataSetCreateInfoError', 'dm_pro_errors.GetDataSetCreateInfoError', ([], {'message_kv': "{'dataset_id': dataset_id}"}), "(message_kv={'dataset_id': dataset_id})\n", (2570, 2609), True, 'from datamanage.pro import exceptions as dm_pro_errors\n'), ((2983, 3029), 'datamanage.pro.utils.time.utc_to_local', 'utc_to_local', (["create_info_ret[0]['created_at']"], {}), "(create_info_ret[0]['created_at'])\n", (2995, 3029), False, 'from datamanage.pro.utils.time import utc_to_local, str_to_datetime\n'), ((3189, 3235), 'datamanage.pro.utils.time.utc_to_local', 'utc_to_local', (["create_info_ret[0]['created_at']"], {}), "(create_info_ret[0]['created_at'])\n", (3201, 3235), False, 'from datamanage.pro.utils.time import utc_to_local, str_to_datetime\n')] |
#!/usr/bin/env python
# runs after the job (and after the default post-filter)
from galaxy.tools.parameters import DataToolParameter
# Older py compatibility
try:
set()
except:
from sets import Set as set
def validate_input( trans, error_map, param_values, page_param_map ):
dbkeys = set()
data_param_names = set()
data_params = 0
for name, param in page_param_map.items():
if isinstance( param, DataToolParameter ):
# for each dataset parameter
if param_values.get(name, None) is not None:
dbkeys.add( param_values[name].dbkey )
data_params += 1
# check meta data
try:
param = param_values[name]
int( param.metadata.startCol )
int( param.metadata.endCol )
int( param.metadata.chromCol )
if param.metadata.strandCol is not None:
int( param.metadata.strandCol )
except:
error_msg = ("The attributes of this dataset are not properly set. "
"Click the pencil icon in the history item to set the chrom, start, end and strand columns.")
error_map[name] = error_msg
data_param_names.add( name )
if len( dbkeys ) > 1:
for name in data_param_names:
error_map[name] = "All datasets must belong to same genomic build, " \
"this dataset is linked to build '%s'" % param_values[name].dbkey
if data_params != len(data_param_names):
for name in data_param_names:
error_map[name] = "A dataset of the appropriate type is required"
| [
"sets.Set"
] | [((168, 173), 'sets.Set', 'set', ([], {}), '()\n', (171, 173), True, 'from sets import Set as set\n'), ((299, 304), 'sets.Set', 'set', ([], {}), '()\n', (302, 304), True, 'from sets import Set as set\n'), ((328, 333), 'sets.Set', 'set', ([], {}), '()\n', (331, 333), True, 'from sets import Set as set\n')] |
# -*- coding: utf-8 -*-
'''
Salt module to manage unix mounts and the fstab file
'''
from __future__ import absolute_import
# Import python libs
import os
import re
import logging
# Import salt libs
import salt.utils
from salt._compat import string_types
from salt.utils import which as _which
from salt.exceptions import CommandNotFoundError, CommandExecutionError
# Set up logger
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'mount'
def __virtual__():
'''
Only load on POSIX-like systems
'''
# Disable on Windows, a specific file module exists:
if salt.utils.is_windows():
return False
return True
def _list_mounts():
ret = {}
if __grains__['os'] in ['MacOS', 'Darwin']:
mounts = __salt__['cmd.run_stdout']('mount')
else:
mounts = __salt__['cmd.run_stdout']('mount -l')
for line in mounts.split('\n'):
comps = re.sub(r"\s+", " ", line).split()
ret[comps[2]] = comps[0]
return ret
def _active_mountinfo(ret):
_list = _list_mounts()
filename = '/proc/self/mountinfo'
if not os.access(filename, os.R_OK):
msg = 'File not readable {0}'
raise CommandExecutionError(msg.format(filename))
blkid_info = __salt__['disk.blkid']()
with salt.utils.fopen(filename) as ifile:
for line in ifile:
comps = line.split()
device = comps[2].split(':')
device_name = comps[8]
device_uuid = None
if device_name:
device_uuid = blkid_info.get(device_name, {}).get('UUID')
device_uuid = device_uuid and device_uuid.lower()
ret[comps[4]] = {'mountid': comps[0],
'parentid': comps[1],
'major': device[0],
'minor': device[1],
'root': comps[3],
'opts': comps[5].split(','),
'fstype': comps[7],
'device': device_name,
'alt_device': _list.get(comps[4], None),
'superopts': comps[9].split(','),
'device_uuid': device_uuid}
return ret
def _active_mounts(ret):
'''
List active mounts on Linux systems
'''
_list = _list_mounts()
filename = '/proc/self/mounts'
if not os.access(filename, os.R_OK):
msg = 'File not readable {0}'
raise CommandExecutionError(msg.format(filename))
with salt.utils.fopen(filename) as ifile:
for line in ifile:
comps = line.split()
ret[comps[1]] = {'device': comps[0],
'alt_device': _list.get(comps[1], None),
'fstype': comps[2],
'opts': comps[3].split(',')}
return ret
def _active_mounts_freebsd(ret):
'''
List active mounts on FreeBSD systems
'''
for line in __salt__['cmd.run_stdout']('mount -p').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': comps[3].split(',')}
return ret
def _active_mounts_solaris(ret):
'''
List active mounts on Solaris systems
'''
for line in __salt__['cmd.run_stdout']('mount -v').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
ret[comps[2]] = {'device': comps[0],
'fstype': comps[4],
'opts': comps[5].split('/')}
return ret
def _active_mounts_openbsd(ret):
'''
List active mounts on OpenBSD systems
'''
for line in __salt__['cmd.run_stdout']('mount -v').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
nod = __salt__['cmd.run_stdout']('ls -l {0}'.format(comps[0]))
nod = ' '.join(nod.split()).split(" ")
parens = re.findall(r'\((.*?)\)', line, re.DOTALL)
ret[comps[3]] = {'device': comps[0],
'fstype': comps[5],
'opts': parens[1].split(", "),
'major': str(nod[4].strip(",")),
'minor': str(nod[5]),
'device_uuid': parens[0]}
return ret
def _active_mounts_darwin(ret):
'''
List active mounts on Mac OS systems
'''
for line in __salt__['cmd.run_stdout']('mount').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
parens = re.findall(r'\((.*?)\)', line, re.DOTALL)[0].split(", ")
ret[comps[2]] = {'device': comps[0],
'fstype': parens[0],
'opts': parens[1:]}
return ret
def active(extended=False):
'''
List the active mounts.
CLI Example:
.. code-block:: bash
salt '*' mount.active
'''
ret = {}
if __grains__['os'] == 'FreeBSD':
_active_mounts_freebsd(ret)
elif __grains__['os'] == 'Solaris':
_active_mounts_solaris(ret)
elif __grains__['os'] == 'OpenBSD':
_active_mounts_openbsd(ret)
elif __grains__['os'] in ['MacOS', 'Darwin']:
_active_mounts_darwin(ret)
else:
if extended:
try:
_active_mountinfo(ret)
except CommandExecutionError:
_active_mounts(ret)
else:
_active_mounts(ret)
return ret
def fstab(config='/etc/fstab'):
'''
List the contents of the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.fstab
'''
ret = {}
if not os.path.isfile(config):
return ret
with salt.utils.fopen(config) as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
continue
if not line.strip():
# Blank line
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
continue
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': comps[3].split(','),
'dump': comps[4],
'pass': comps[5]}
return ret
def rm_fstab(name, device, config='/etc/fstab'):
'''
Remove the mount point from the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.rm_fstab /mnt/foo
'''
contents = fstab(config)
if name not in contents:
return True
# The entry is present, get rid of it
lines = []
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
lines.append(line)
continue
comps = line.split()
if device:
if comps[1] == name and comps[0] == device:
continue
else:
if comps[1] == name:
continue
lines.append(line)
except (IOError, OSError) as exc:
msg = "Couldn't read from {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
try:
with salt.utils.fopen(config, 'w+') as ofile:
ofile.writelines(lines)
except (IOError, OSError) as exc:
msg = "Couldn't write to {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
return True
def set_fstab(
name,
device,
fstype,
opts='defaults',
dump=0,
pass_num=0,
config='/etc/fstab',
test=False,
**kwargs):
'''
Verify that this mount is represented in the fstab, change the mount
to match the data passed, or add the mount if it is not present.
CLI Example:
.. code-block:: bash
salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
if isinstance(opts, list):
opts = ','.join(opts)
lines = []
change = False
present = False
if not os.path.isfile(config):
raise CommandExecutionError('Bad config file "{0}"'.format(config))
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
lines.append(line)
continue
if comps[1] == name or comps[0] == device:
# check to see if there are changes
# and fix them if there are any
present = True
if comps[0] != device:
change = True
comps[0] = device
if comps[1] != name:
change = True
comps[1] = name
if comps[2] != fstype:
change = True
comps[2] = fstype
if comps[3] != opts:
change = True
comps[3] = opts
if comps[4] != str(dump):
change = True
comps[4] = str(dump)
if comps[5] != str(pass_num):
change = True
comps[5] = str(pass_num)
if change:
log.debug(
'fstab entry for mount point {0} needs to be '
'updated'.format(name)
)
newline = (
'{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(
device, name, fstype, opts, dump, pass_num
)
)
lines.append(newline)
else:
lines.append(line)
except (IOError, OSError) as exc:
msg = 'Couldn\'t read from {0}: {1}'
raise CommandExecutionError(msg.format(config, str(exc)))
if change:
if not salt.utils.test_mode(test=test, **kwargs):
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
msg = 'File not writable {0}'
raise CommandExecutionError(msg.format(config))
return 'change'
if not change:
if present:
# The right entry is already here
return 'present'
else:
if not salt.utils.test_mode(test=test, **kwargs):
# The entry is new, add it to the end of the fstab
newline = '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(device,
name,
fstype,
opts,
dump,
pass_num)
lines.append(newline)
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
raise CommandExecutionError(
'File not writable {0}'.format(
config
)
)
return 'new'
def rm_automaster(name, device, config='/etc/auto_salt'):
'''
Remove the mount point from the auto_master
CLI Example:
.. code-block:: bash
salt '*' mount.rm_automaster /mnt/foo
'''
contents = automaster(config)
if name not in contents:
return True
# The entry is present, get rid of it
lines = []
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 3:
# Invalid entry
lines.append(line)
continue
comps = line.split()
prefix = "/.."
name_chk = comps[0].replace(prefix, "")
device_fmt = comps[2].split(":")
if device:
if name_chk == name and device_fmt[1] == device:
continue
else:
if name_chk == name:
continue
lines.append(line)
except (IOError, OSError) as exc:
msg = "Couldn't read from {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
try:
with salt.utils.fopen(config, 'w+') as ofile:
ofile.writelines(lines)
except (IOError, OSError) as exc:
msg = "Couldn't write to {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
# Update automount
__salt__['cmd.run']('automount -cv')
return True
def set_automaster(
name,
device,
fstype,
opts='',
config='/etc/auto_salt',
test=False,
**kwargs):
'''
Verify that this mount is represented in the auto_salt, change the mount
to match the data passed, or add the mount if it is not present.
CLI Example:
.. code-block:: bash
salt '*' mount.set_automaster /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
if isinstance(opts, list):
opts = ','.join(opts)
lines = []
change = False
present = False
automaster_file = "/etc/auto_master"
if not os.path.isfile(config):
__salt__['file.touch'](config)
__salt__['file.append'](automaster_file, "/-\t\t\t{0}".format(config))
name = "/..{0}".format(name)
device_fmt = "{0}:{1}".format(fstype, device)
type_opts = "-fstype={0},{1}".format(fstype, opts)
if fstype == 'smbfs':
device_fmt = device_fmt.replace(fstype, "")
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 3:
# Invalid entry
lines.append(line)
continue
if comps[0] == name or comps[2] == device_fmt:
# check to see if there are changes
# and fix them if there are any
present = True
if comps[0] != name:
change = True
comps[0] = name
if comps[1] != type_opts:
change = True
comps[1] = type_opts
if comps[2] != device_fmt:
change = True
comps[2] = device_fmt
if change:
log.debug(
'auto_master entry for mount point {0} needs to be '
'updated'.format(name)
)
newline = (
'{0}\t{1}\t{2}\n'.format(
name, type_opts, device_fmt)
)
lines.append(newline)
else:
lines.append(line)
except (IOError, OSError) as exc:
msg = 'Couldn\'t read from {0}: {1}'
raise CommandExecutionError(msg.format(config, str(exc)))
if change:
if not salt.utils.test_mode(test=test, **kwargs):
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
msg = 'File not writable {0}'
raise CommandExecutionError(msg.format(config))
return 'change'
if not change:
if present:
# The right entry is already here
return 'present'
else:
if not salt.utils.test_mode(test=test, **kwargs):
# The entry is new, add it to the end of the fstab
newline = (
'{0}\t{1}\t{2}\n'.format(
name, type_opts, device_fmt)
)
lines.append(newline)
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
raise CommandExecutionError(
'File not writable {0}'.format(
config
)
)
return 'new'
def automaster(config='/etc/auto_salt'):
'''
List the contents of the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.fstab
'''
ret = {}
if not os.path.isfile(config):
return ret
with salt.utils.fopen(config) as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
continue
if not line.strip():
# Blank line
continue
comps = line.split()
if len(comps) != 3:
# Invalid entry
continue
prefix = "/.."
name = comps[0].replace(prefix, "")
device_fmt = comps[2].split(":")
opts = comps[1].split(',')
ret[name] = {'device': device_fmt[1],
'fstype': opts[0],
'opts': opts[1:]}
return ret
def mount(name, device, mkmnt=False, fstype='', opts='defaults', user=None):
'''
Mount a device
CLI Example:
.. code-block:: bash
salt '*' mount.mount /mnt/foo /dev/sdz1 True
'''
# Darwin doesn't expect defaults when mounting without other options
if 'defaults' in opts and __grains__['os'] in ['MacOS', 'Darwin']:
opts = None
if isinstance(opts, string_types):
opts = opts.split(',')
if not os.path.exists(name) and mkmnt:
__salt__['file.mkdir'](name=name, user=user)
args = ''
if opts is not None:
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
args += ' -t {0}'.format(fstype)
cmd = 'mount {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_all'](cmd, runas=user)
if out['retcode']:
return out['stderr']
return True
def remount(name, device, mkmnt=False, fstype='', opts='defaults', user=None):
'''
Attempt to remount a device, if the device is not already mounted, mount
is called
CLI Example:
.. code-block:: bash
salt '*' mount.remount /mnt/foo /dev/sdz1 True
'''
force_mount = False
if __grains__['os'] in ['MacOS', 'Darwin']:
if opts == 'defaults':
opts = 'noowners'
if fstype == 'smbfs':
force_mount = True
if isinstance(opts, string_types):
opts = opts.split(',')
mnts = active()
if name in mnts:
# The mount point is mounted, attempt to remount it with the given data
if 'remount' not in opts and __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin']:
opts.append('remount')
if force_mount:
# We need to force the mount but first we should unmount
umount(name, device, user=user)
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
args += ' -t {0}'.format(fstype)
if __grains__['os'] not in ['OpenBSD', 'MacOS', 'Darwin'] or force_mount:
cmd = 'mount {0} {1} {2} '.format(args, device, name)
else:
cmd = 'mount -u {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_all'](cmd, runas=user)
if out['retcode']:
return out['stderr']
return True
# Mount a filesystem that isn't already
return mount(name, device, mkmnt, fstype, opts, user=user)
def umount(name, device=None, user=None):
'''
Attempt to unmount a device by specifying the directory it is mounted on
CLI Example:
.. code-block:: bash
salt '*' mount.umount /mnt/foo
.. versionadded:: Lithium
salt '*' mount.umount /mnt/foo /dev/xvdc1
'''
mnts = active()
if name not in mnts:
return "{0} does not have anything mounted".format(name)
if not device:
cmd = 'umount {0}'.format(name)
else:
cmd = 'umount {0}'.format(device)
out = __salt__['cmd.run_all'](cmd, runas=user)
if out['retcode']:
return out['stderr']
return True
def is_fuse_exec(cmd):
'''
Returns true if the command passed is a fuse mountable application.
CLI Example:
.. code-block:: bash
salt '*' mount.is_fuse_exec sshfs
'''
cmd_path = _which(cmd)
# No point in running ldd on a command that doesn't exist
if not cmd_path:
return False
elif not _which('ldd'):
raise CommandNotFoundError('ldd')
out = __salt__['cmd.run']('ldd {0}'.format(cmd_path))
return 'libfuse' in out
def swaps():
'''
Return a dict containing information on active swap
CLI Example:
.. code-block:: bash
salt '*' mount.swaps
'''
ret = {}
if __grains__['os'] != 'OpenBSD':
with salt.utils.fopen('/proc/swaps') as fp_:
for line in fp_:
if line.startswith('Filename'):
continue
comps = line.split()
ret[comps[0]] = {'type': comps[1],
'size': comps[2],
'used': comps[3],
'priority': comps[4]}
else:
for line in __salt__['cmd.run_stdout']('swapctl -kl').splitlines():
if line.startswith(('Device', 'Total')):
continue
swap_type = "file"
comps = line.split()
if comps[0].startswith('/dev/'):
swap_type = "partition"
ret[comps[0]] = {'type': swap_type,
'size': comps[1],
'used': comps[2],
'priority': comps[5]}
return ret
def swapon(name, priority=None):
'''
Activate a swap disk
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile
'''
ret = {}
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = False
return ret
cmd = 'swapon {0}'.format(name)
if priority:
cmd += ' -p {0}'.format(priority)
__salt__['cmd.run'](cmd)
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = True
return ret
return ret
def swapoff(name):
'''
Deactivate a named swap mount
CLI Example:
.. code-block:: bash
salt '*' mount.swapoff /root/swapfile
'''
on_ = swaps()
if name in on_:
if __grains__['os'] != 'OpenBSD':
__salt__['cmd.run']('swapoff {0}'.format(name))
else:
__salt__['cmd.run']('swapctl -d {0}'.format(name))
on_ = swaps()
if name in on_:
return False
return True
return None
def is_mounted(name):
'''
.. versionadded:: 2014.7.0
Provide information if the path is mounted
CLI Example:
.. code-block:: bash
salt '*' mount.is_mounted /mnt/share
'''
active_ = active()
if name in active_:
return True
else:
return False
| [
"logging.getLogger",
"os.path.exists",
"os.access",
"os.path.isfile",
"salt.exceptions.CommandNotFoundError",
"salt.utils.which",
"re.sub",
"re.findall"
] | [((391, 418), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (408, 418), False, 'import logging\n'), ((22597, 22608), 'salt.utils.which', '_which', (['cmd'], {}), '(cmd)\n', (22603, 22608), True, 'from salt.utils import which as _which\n'), ((1123, 1151), 'os.access', 'os.access', (['filename', 'os.R_OK'], {}), '(filename, os.R_OK)\n', (1132, 1151), False, 'import os\n'), ((2440, 2468), 'os.access', 'os.access', (['filename', 'os.R_OK'], {}), '(filename, os.R_OK)\n', (2449, 2468), False, 'import os\n'), ((4000, 4042), 're.findall', 're.findall', (['"""\\\\((.*?)\\\\)"""', 'line', 're.DOTALL'], {}), "('\\\\((.*?)\\\\)', line, re.DOTALL)\n", (4010, 4042), False, 'import re\n'), ((5672, 5694), 'os.path.isfile', 'os.path.isfile', (['config'], {}), '(config)\n', (5686, 5694), False, 'import os\n'), ((8551, 8573), 'os.path.isfile', 'os.path.isfile', (['config'], {}), '(config)\n', (8565, 8573), False, 'import os\n'), ((14923, 14945), 'os.path.isfile', 'os.path.isfile', (['config'], {}), '(config)\n', (14937, 14945), False, 'import os\n'), ((18576, 18598), 'os.path.isfile', 'os.path.isfile', (['config'], {}), '(config)\n', (18590, 18598), False, 'import os\n'), ((19762, 19782), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (19776, 19782), False, 'import os\n'), ((22727, 22740), 'salt.utils.which', '_which', (['"""ldd"""'], {}), "('ldd')\n", (22733, 22740), True, 'from salt.utils import which as _which\n'), ((22756, 22783), 'salt.exceptions.CommandNotFoundError', 'CommandNotFoundError', (['"""ldd"""'], {}), "('ldd')\n", (22776, 22783), False, 'from salt.exceptions import CommandNotFoundError, CommandExecutionError\n'), ((935, 960), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'line'], {}), "('\\\\s+', ' ', line)\n", (941, 960), False, 'import re\n'), ((3091, 3116), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'line'], {}), "('\\\\s+', ' ', line)\n", (3097, 3116), False, 'import re\n'), ((3461, 3486), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'line'], {}), "('\\\\s+', ' ', line)\n", (3467, 3486), False, 'import re\n'), ((3831, 3856), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'line'], {}), "('\\\\s+', ' ', line)\n", (3837, 3856), False, 'import re\n'), ((4531, 4556), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'line'], {}), "('\\\\s+', ' ', line)\n", (4537, 4556), False, 'import re\n'), ((4582, 4624), 're.findall', 're.findall', (['"""\\\\((.*?)\\\\)"""', 'line', 're.DOTALL'], {}), "('\\\\((.*?)\\\\)', line, re.DOTALL)\n", (4592, 4624), False, 'import re\n')] |
from django.contrib import admin
from base.models import Topic, Photo
class EONBaseAdmin(admin.ModelAdmin):
def get_changeform_initial_data(self, request):
initial = super().get_changeform_initial_data(request)
if 'add' in request.META['PATH_INFO']:
initial['created_by'] = request.user
initial['modified_by'] = request.user
return initial
def save_model(self, request, obj, form, change):
if not obj.created_by:
obj.created_by = request.user
return super().save_model(request, obj, form, change)
class TopicAdmin(EONBaseAdmin):
list_display = [
'name', 'parent_topic', 'top_level', 'modified_by', 'modified', 'created_by', 'created',
]
class PhotoAdmin(EONBaseAdmin):
# TODO Add Proper List Display
pass
admin.site.register(Topic, TopicAdmin)
admin.site.register(Photo, PhotoAdmin)
| [
"django.contrib.admin.site.register"
] | [((829, 867), 'django.contrib.admin.site.register', 'admin.site.register', (['Topic', 'TopicAdmin'], {}), '(Topic, TopicAdmin)\n', (848, 867), False, 'from django.contrib import admin\n'), ((868, 906), 'django.contrib.admin.site.register', 'admin.site.register', (['Photo', 'PhotoAdmin'], {}), '(Photo, PhotoAdmin)\n', (887, 906), False, 'from django.contrib import admin\n')] |
import traceback
from pprint import pformat
from threading import Thread
import itchat
import logging
from wxpy.chat import Chat
from wxpy.chats import Chats
from wxpy.friend import Friend
from wxpy.group import Group
from wxpy.message import MessageConfigs, Messages, Message, MessageConfig
from wxpy.mp import MP
from wxpy.response import ResponseError
from wxpy.user import User
from wxpy.utils.constants import SYSTEM
from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list
logger = logging.getLogger('wxpy')
class Robot(object):
"""
机器人对象,用于登陆和操作微信账号,涵盖大部分 Web 微信的功能
"""
def __init__(
self, save_path=None, console_qr=False, qr_path=None,
qr_callback=None, login_callback=None, logout_callback=None
):
"""
:param save_path:
| 用于保存或载入登陆状态的文件路径,例如: 'wxpy.pkl',为空则不尝试载入。
| 填写本参数后,可在短时间内重新载入登陆状态,避免重复扫码,失效时会重新要求登陆
:param console_qr: 在终端中显示登陆二维码,需要安装 Pillow 模块
:param qr_path: 保存二维码的路径
:param qr_callback: 获得二维码时的回调,接收参数: uuid, status, qrcode
:param login_callback: 登陆时的回调,接收参数同上
:param logout_callback: 登出时的回调,接收参数同上
"""
self.core = itchat.Core()
itchat.instanceList.append(self)
self.core.auto_login(
hotReload=bool(save_path), statusStorageDir=save_path,
enableCmdQR=console_qr, picDir=qr_path, qrCallback=qr_callback,
loginCallback=login_callback, exitCallback=logout_callback
)
self.message_configs = MessageConfigs(self)
self.messages = Messages(robot=self)
self.file_helper = Chat(wrap_user_name('filehelper'))
self.file_helper.robot = self
self.file_helper.nick_name = '文件传输助手'
self.self = Chat(self.core.loginInfo['User'])
self.self.robot = self
self.save_path = save_path
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.self.name)
@handle_response()
def logout(self):
"""
登出当前账号
"""
return self.core.logout()
@property
def alive(self):
"""
当前的登陆状态
:return: 若为登陆状态,则为 True,否则为 False
"""
return self.core.alive
@alive.setter
def alive(self, value):
self.core.alive = value
def dump_login_status(self, save_path=None):
return self.core.dump_login_status(save_path or self.save_path)
# chats
def except_self(self, chats_or_dicts):
"""
从聊天对象合集或用户字典列表中排除自身
:param chats_or_dicts: 聊天对象合集或用户字典列表
:return: 排除自身后的列表
"""
return list(filter(lambda x: get_user_name(x) != self.self.user_name, chats_or_dicts))
def chats(self, update=False):
"""
获取所有聊天对象
:param update: 是否更新
:return: 聊天对象合集
"""
return Chats(self.friends(update) + self.groups(update) + self.mps(update), self)
def friends(self, update=False):
"""
获取所有好友
:param update: 是否更新
:return: 聊天对象合集
"""
@handle_response(Friend)
def do():
return self.core.get_friends(update=update)
ret = do()
ret.source = self
return ret
@handle_response(Group)
def groups(self, update=False, contact_only=False):
"""
获取所有群聊
:param update: 是否更新
:param contact_only: 是否限于保存为联系人的群聊
:return: 群聊合集
"""
return self.core.get_chatrooms(update=update, contactOnly=contact_only)
@handle_response(MP)
def mps(self, update=False):
"""
获取所有公众号
:param update: 是否更新
:return: 聊天对象合集
"""
return self.core.get_mps(update=update)
@handle_response(User)
def user_details(self, user_or_users, chunk_size=50):
"""
获取单个或批量获取多个用户的详细信息(地区、性别、签名等),但不可用于群聊成员
:param user_or_users: 单个或多个用户对象或 user_name
:param chunk_size: 分配请求时的单批数量,目前为 50
:return: 单个或多个用户用户的详细信息
"""
def chunks():
total = ensure_list(user_or_users)
for i in range(0, len(total), chunk_size):
yield total[i:i + chunk_size]
@handle_response()
def process_one_chunk(_chunk):
return self.core.update_friend(userName=get_user_name(_chunk))
if isinstance(user_or_users, (list, tuple)):
ret = list()
for chunk in chunks():
chunk_ret = process_one_chunk(chunk)
if isinstance(chunk_ret, list):
ret += chunk_ret
else:
ret.append(chunk_ret)
return ret
else:
return process_one_chunk(user_or_users)
def search(self, name=None, **attributes):
"""
在所有类型的聊天对象中进行搜索
:param name: 名称 (可以是昵称、备注等)
:param attributes: 属性键值对,键可以是 sex(性别), province(省份), city(城市) 等。例如可指定 province='广东'
:return: 匹配的聊天对象合集
"""
return self.chats().search(name, **attributes)
# add / create
@handle_response()
def add_friend(self, user, verify_content=''):
"""
添加用户为好友
:param user: 用户对象或用户名
:param verify_content: 验证说明信息
"""
return self.core.add_friend(
userName=get_user_name(user),
status=2,
verifyContent=verify_content,
autoUpdate=True
)
@handle_response()
def accept_friend(self, user, verify_content=''):
"""
接受用户为好友
:param user: 用户对象或用户名
:param verify_content: 验证说明信息
"""
# Todo: 验证好友接口可用性,并在接受好友时直接返回新好友
return self.core.add_friend(
userName=get_user_name(user),
status=3,
verifyContent=verify_content,
autoUpdate=True
)
def create_group(self, users, topic=None):
"""
创建一个新的群聊
:param users: 用户列表
:param topic: 群名称
:return: 若建群成功,返回一个新的群聊对象
"""
@handle_response()
def request():
return self.core.create_chatroom(
memberList=wrap_user_name(users),
topic=topic or ''
)
ret = request()
user_name = ret.get('ChatRoomName')
if user_name:
return Group(self.core.update_chatroom(userName=user_name))
else:
raise ResponseError('Failed to create group:\n{}'.format(pformat(ret)))
# messages
def _process_message(self, msg):
"""
处理接收到的消息
"""
if not self.alive:
return
func, run_async = self.message_configs.get_func(msg)
if not func:
return
def process():
# noinspection PyBroadException
try:
ret = func(msg)
if ret is not None:
if isinstance(ret, (tuple, list)):
self.core.send(
msg=str(ret[0]),
toUserName=msg.chat.user_name,
mediaId=ret[1]
)
else:
self.core.send(
msg=str(ret),
toUserName=msg.chat.user_name
)
except:
logger.warning(
'An error occurred in registered function, '
'use `Robot().start(debug=True)` to show detailed information')
logger.debug(traceback.format_exc())
if run_async:
Thread(target=process).start()
else:
process()
def register(
self, chats=None, msg_types=None,
except_self=True, run_async=True, enabled=True
):
"""
装饰器:用于注册消息配置
:param chats: 单个或列表形式的多个聊天对象或聊天类型,为空时匹配所有聊天对象
:param msg_types: 单个或列表形式的多个消息类型,为空时匹配所有消息类型 (SYSTEM 类消息除外)
:param except_self: 排除自己在手机上发送的消息
:param run_async: 异步执行配置的函数,可提高响应速度
:param enabled: 当前配置的默认开启状态,可事后动态开启或关闭
"""
def register(func):
self.message_configs.append(MessageConfig(
robot=self, func=func, chats=chats, msg_types=msg_types,
except_self=except_self, run_async=run_async, enabled=enabled
))
return func
return register
def start(self, block=True):
"""
开始监听和处理消息
:param block: 是否堵塞线程,为 False 时将在新的线程中运行
"""
def listen():
logger.info('{} Auto-reply started.'.format(self))
try:
while self.alive:
msg = Message(self.core.msgList.get(), self)
if msg.type is not SYSTEM:
self.messages.append(msg)
self._process_message(msg)
except KeyboardInterrupt:
logger.info('KeyboardInterrupt received, ending...')
self.alive = False
if self.core.useHotReload:
self.dump_login_status()
logger.info('Bye.')
if block:
listen()
else:
t = Thread(target=listen, daemon=True)
t.start()
| [
"logging.getLogger",
"traceback.format_exc",
"wxpy.utils.tools.handle_response",
"wxpy.chat.Chat",
"itchat.Core",
"wxpy.message.MessageConfigs",
"wxpy.message.MessageConfig",
"pprint.pformat",
"wxpy.utils.tools.wrap_user_name",
"itchat.instanceList.append",
"wxpy.message.Messages",
"threading.Thread",
"wxpy.utils.tools.get_user_name",
"wxpy.utils.tools.ensure_list"
] | [((523, 548), 'logging.getLogger', 'logging.getLogger', (['"""wxpy"""'], {}), "('wxpy')\n", (540, 548), False, 'import logging\n'), ((1995, 2012), 'wxpy.utils.tools.handle_response', 'handle_response', ([], {}), '()\n', (2010, 2012), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((3267, 3289), 'wxpy.utils.tools.handle_response', 'handle_response', (['Group'], {}), '(Group)\n', (3282, 3289), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((3565, 3584), 'wxpy.utils.tools.handle_response', 'handle_response', (['MP'], {}), '(MP)\n', (3580, 3584), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((3765, 3786), 'wxpy.utils.tools.handle_response', 'handle_response', (['User'], {}), '(User)\n', (3780, 3786), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((5098, 5115), 'wxpy.utils.tools.handle_response', 'handle_response', ([], {}), '()\n', (5113, 5115), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((5463, 5480), 'wxpy.utils.tools.handle_response', 'handle_response', ([], {}), '()\n', (5478, 5480), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((1213, 1226), 'itchat.Core', 'itchat.Core', ([], {}), '()\n', (1224, 1226), False, 'import itchat\n'), ((1235, 1267), 'itchat.instanceList.append', 'itchat.instanceList.append', (['self'], {}), '(self)\n', (1261, 1267), False, 'import itchat\n'), ((1555, 1575), 'wxpy.message.MessageConfigs', 'MessageConfigs', (['self'], {}), '(self)\n', (1569, 1575), False, 'from wxpy.message import MessageConfigs, Messages, Message, MessageConfig\n'), ((1600, 1620), 'wxpy.message.Messages', 'Messages', ([], {'robot': 'self'}), '(robot=self)\n', (1608, 1620), False, 'from wxpy.message import MessageConfigs, Messages, Message, MessageConfig\n'), ((1789, 1822), 'wxpy.chat.Chat', 'Chat', (["self.core.loginInfo['User']"], {}), "(self.core.loginInfo['User'])\n", (1793, 1822), False, 'from wxpy.chat import Chat\n'), ((3097, 3120), 'wxpy.utils.tools.handle_response', 'handle_response', (['Friend'], {}), '(Friend)\n', (3112, 3120), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((4227, 4244), 'wxpy.utils.tools.handle_response', 'handle_response', ([], {}), '()\n', (4242, 4244), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((6055, 6072), 'wxpy.utils.tools.handle_response', 'handle_response', ([], {}), '()\n', (6070, 6072), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((1654, 1682), 'wxpy.utils.tools.wrap_user_name', 'wrap_user_name', (['"""filehelper"""'], {}), "('filehelper')\n", (1668, 1682), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((4089, 4115), 'wxpy.utils.tools.ensure_list', 'ensure_list', (['user_or_users'], {}), '(user_or_users)\n', (4100, 4115), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((9255, 9289), 'threading.Thread', 'Thread', ([], {'target': 'listen', 'daemon': '(True)'}), '(target=listen, daemon=True)\n', (9261, 9289), False, 'from threading import Thread\n'), ((5334, 5353), 'wxpy.utils.tools.get_user_name', 'get_user_name', (['user'], {}), '(user)\n', (5347, 5353), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((5745, 5764), 'wxpy.utils.tools.get_user_name', 'get_user_name', (['user'], {}), '(user)\n', (5758, 5764), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((8216, 8353), 'wxpy.message.MessageConfig', 'MessageConfig', ([], {'robot': 'self', 'func': 'func', 'chats': 'chats', 'msg_types': 'msg_types', 'except_self': 'except_self', 'run_async': 'run_async', 'enabled': 'enabled'}), '(robot=self, func=func, chats=chats, msg_types=msg_types,\n except_self=except_self, run_async=run_async, enabled=enabled)\n', (8229, 8353), False, 'from wxpy.message import MessageConfigs, Messages, Message, MessageConfig\n'), ((4336, 4357), 'wxpy.utils.tools.get_user_name', 'get_user_name', (['_chunk'], {}), '(_chunk)\n', (4349, 4357), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((6169, 6190), 'wxpy.utils.tools.wrap_user_name', 'wrap_user_name', (['users'], {}), '(users)\n', (6183, 6190), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((6486, 6498), 'pprint.pformat', 'pformat', (['ret'], {}), '(ret)\n', (6493, 6498), False, 'from pprint import pformat\n'), ((7648, 7670), 'threading.Thread', 'Thread', ([], {'target': 'process'}), '(target=process)\n', (7654, 7670), False, 'from threading import Thread\n'), ((2679, 2695), 'wxpy.utils.tools.get_user_name', 'get_user_name', (['x'], {}), '(x)\n', (2692, 2695), False, 'from wxpy.utils.tools import handle_response, get_user_name, wrap_user_name, ensure_list\n'), ((7589, 7611), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7609, 7611), False, 'import traceback\n')] |
from algovision import app
if(__name__=="__main__"):
app.run(debug=True,host='0.0.0.0')
| [
"algovision.app.run"
] | [((58, 93), 'algovision.app.run', 'app.run', ([], {'debug': '(True)', 'host': '"""0.0.0.0"""'}), "(debug=True, host='0.0.0.0')\n", (65, 93), False, 'from algovision import app\n')] |
"""Test the search module"""
from collections.abc import Iterable, Sized
from io import StringIO
from itertools import chain, product
from functools import partial
import pickle
import sys
from types import GeneratorType
import re
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.utils.fixes import sp_version
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.base import clone
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import fit_grid_point
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier:
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert len(X) == len(Y)
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert list(grid) == [grid[i] for i in range(len(grid))]
@pytest.mark.parametrize("klass", [ParameterGrid,
partial(ParameterSampler, n_iter=10)])
@pytest.mark.parametrize(
"input, error_type, error_message",
[(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'),
([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'),
({'foo': 0}, TypeError, "Parameter.* value is not iterable .*"
r"\(key='foo', value=0\)")]
)
def test_validate_parameter_input(klass, input, error_type, error_message):
with pytest.raises(error_type, match=error_message):
klass(input)
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert isinstance(grid1, Iterable)
assert isinstance(grid1, Sized)
assert len(grid1) == 3
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert len(grid2) == 6
# loop to assert we can iterate over the grid multiple times
for i in range(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert (points ==
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert len(empty) == 1
assert list(empty) == [{}]
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert len(has_empty) == 4
assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}]
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert grid_search.best_estimator_.foo_param == 2
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def test_grid_search_pipeline_steps():
# check that parameters that are estimators are cloned before fitting
pipe = Pipeline([('regressor', LinearRegression())])
param_grid = {'regressor': [LinearRegression(), Ridge()]}
grid_search = GridSearchCV(pipe, param_grid, cv=2)
grid_search.fit(X, y)
regressor_results = grid_search.cv_results_['param_regressor']
assert isinstance(regressor_results[0], LinearRegression)
assert isinstance(regressor_results[1], Ridge)
assert not hasattr(regressor_results[0], 'coef_')
assert not hasattr(regressor_results[1], 'coef_')
assert regressor_results[0] is not grid_search.best_estimator_
assert regressor_results[1] is not grid_search.best_estimator_
# check that we didn't modify the parameter grid that was passed
assert not hasattr(param_grid['regressor'][0], 'coef_')
assert not hasattr(param_grid['regressor'][1], 'coef_')
@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV])
def test_SearchCV_with_fit_params(SearchCV):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = SearchCV(
clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise"
)
# The CheckingClassifier generates an assertion error if
# a parameter is missing or has length != len(X).
err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen."
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(10))
err_msg = "Fit parameter spam has length 1; expected"
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert grid_search_no_score.best_params_ == grid_search.best_params_
# check that we can call score and that it gives the correct result
assert grid_search.score(X, y) == grid_search_no_score.score(X, y)
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc'
).fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert score_auc < 1.0
assert score_accuracy < 1.0
assert score_auc != score_accuracy
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2),
GroupKFold(n_splits=3), GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert not hasattr(grid_search, 'classes_')
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert not hasattr(grid_search, 'classes_')
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert not hasattr(grid_search, 'classes_')
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3)
grid_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3)
random_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
for scoring in [None, ['accuracy', 'precision']]:
grid_search = GridSearchCV(
clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3
)
grid_search.fit(X, y)
assert not hasattr(grid_search, "best_estimator_") and \
hasattr(grid_search, "best_index_") and \
hasattr(grid_search, "best_params_")
# Make sure the functions predict/transform etc raise meaningful
# error messages
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters'
% fn_name), getattr(grid_search, fn_name), X)
# Test that an invalid refit param raises appropriate error messages
for refit in ["", 5, True, 'recall', 'accuracy']:
assert_raise_message(ValueError, "For multi-metric scoring, the "
"parameter refit must be set to a scorer key",
GridSearchCV(clf, {}, refit=refit,
scoring={'acc': 'accuracy',
'prec': 'precision'}
).fit,
X, y)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC(gamma='auto')
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3)
grid_search.fit(X, y)
assert grid_search.best_estimator_.foo_param == 2
def test_grid_search_bad_param_grid():
param_dict = {"C": 1}
clf = SVC(gamma='auto')
assert_raise_message(
ValueError,
"Parameter grid for parameter (C) needs to"
" be a list or numpy array, but got (<class 'int'>)."
" Single values need to be wrapped in a list"
" with one element.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC(gamma='auto')
assert_raise_message(
ValueError,
"Parameter grid for parameter (C) needs to"
" be a list or numpy array, but got (<class 'str'>)."
" Single values need to be wrapped in a list"
" with one element.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones((3, 2))}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert np.mean(y_pred == y_pred2) >= .9
assert C == C2
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert C == C2
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert C == C3
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert cv.best_score_ >= 0
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert np.mean(y_pred == y_test) >= 0
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert not hasattr(self, 'has_been_fit_')
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_refit_callable():
"""
Test refit=callable, which adds flexibility in identifying the
"best" estimator.
"""
def refit_callable(cv_results):
"""
A dummy function tests `refit=callable` interface.
Return the index of a model that has the least
`mean_test_score`.
"""
# Fit a dummy clf with `refit=True` to get a list of keys in
# clf.cv_results_.
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring='precision', refit=True)
clf.fit(X, y)
# Ensure that `best_index_ != 0` for this dummy clf
assert clf.best_index_ != 0
# Assert every key matches those in `cv_results`
for key in clf.cv_results_.keys():
assert key in cv_results
return cv_results['mean_test_score'].argmin()
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring='precision', refit=refit_callable)
clf.fit(X, y)
assert clf.best_index_ == 0
# Ensure `best_score_` is disabled when using `refit=callable`
assert not hasattr(clf, 'best_score_')
def test_refit_callable_invalid_type():
"""
Test implementation catches the errors when 'best_index_' returns an
invalid result.
"""
def refit_callable_invalid_type(cv_results):
"""
A dummy function tests when returned 'best_index_' is not integer.
"""
return None
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]},
scoring='precision', refit=refit_callable_invalid_type)
with pytest.raises(TypeError,
match='best_index_ returned is not an integer'):
clf.fit(X, y)
@pytest.mark.parametrize('out_bound_value', [-1, 2])
@pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV])
def test_refit_callable_out_bound(out_bound_value, search_cv):
"""
Test implementation catches the errors when 'best_index_' returns an
out of bound result.
"""
def refit_callable_out_bound(cv_results):
"""
A dummy function tests when returned 'best_index_' is out of bounds.
"""
return out_bound_value
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]},
scoring='precision', refit=refit_callable_out_bound)
with pytest.raises(IndexError, match='best_index_ index out of range'):
clf.fit(X, y)
def test_refit_callable_multi_metric():
"""
Test refit=callable in multiple metric evaluation setting
"""
def refit_callable(cv_results):
"""
A dummy function tests `refit=callable` interface.
Return the index of a model that has the least
`mean_test_prec`.
"""
assert 'mean_test_prec' in cv_results
return cv_results['mean_test_prec'].argmin()
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'}
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring=scoring, refit=refit_callable)
clf.fit(X, y)
assert clf.best_index_ == 0
# Ensure `best_score_` is disabled when using `refit=callable`
assert not hasattr(clf, 'best_score_')
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(
check_X=check_X, check_y=check_y, methods_to_check=["fit"],
)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(
check_X=lambda x: isinstance(x, list), methods_to_check=["fit"],
)
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(
check_y=lambda x: isinstance(x, list), methods_to_check=["fit"],
)
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert hasattr(grid_search, "cv_results_")
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
def check_df(x):
return isinstance(x, InputFeatureType)
def check_series(x):
return isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert hasattr(grid_search, "cv_results_")
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(n_samples=50, random_state=0)
km = KMeans(random_state=0, init="random", n_init=1)
# Multi-metric evaluation unsupervised
scoring = ['adjusted_rand_score', 'fowlkes_mallows_score']
for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']:
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring=scoring, refit=refit)
grid_search.fit(X, y)
# Both ARI and FMS can find the right number :)
assert grid_search.best_params_["n_clusters"] == 3
# Single metric evaluation unsupervised
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
assert grid_search.best_params_["n_clusters"] == 3
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert grid_search.best_params_["n_clusters"] == 4
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert search.best_params_['bandwidth'] == .1
assert search.best_score_ == 42
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert len(samples) == 10
for sample in samples:
assert sample["kernel"] in ["rbf", "linear"]
assert 0 <= sample["C"] <= 1
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert [x for x in sampler] == [x for x in sampler]
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert [x for x in sampler] == [x for x in sampler]
def check_cv_results_array_types(search, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
cv_results = search.cv_results_
assert all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys)
assert all(cv_results[key].dtype == object for key in param_keys)
assert not any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys)
assert all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank'))
scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score']
for key in scorer_keys:
assert cv_results['rank_test_%s' % key].dtype == np.int32
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys)
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
search = GridSearchCV(SVC(), cv=n_splits, param_grid=params,
return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert all(cv_results['rank_test_score'] >= 1)
assert (all(cv_results[k] >= 0) for k in score_keys
if k != 'rank_test_score')
assert (all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k != 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = search.cv_results_
n_candidates = len(search.cv_results_['params'])
assert all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear')
assert all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf')
def test_random_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
params = [{'kernel': ['rbf'], 'C': expon(scale=10),
'gamma': expon(scale=0.1)},
{'kernel': ['poly'], 'degree': [2, 3]}]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits,
param_distributions=params,
return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
n_candidates = len(search.cv_results_['params'])
assert all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear')
assert all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf')
@pytest.mark.parametrize(
"SearchCV, specialized_params",
[(GridSearchCV, {'param_grid': {'C': [1, 10]}}),
(RandomizedSearchCV,
{'param_distributions': {'C': [1, 10]}, 'n_iter': 2})]
)
def test_search_default_iid(SearchCV, specialized_params):
# Test the IID parameter TODO: Clearly this test does something else???
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
common_params = {'estimator': SVC(), 'cv': cv,
'return_train_score': True}
search = SearchCV(**common_params, **specialized_params)
search.fit(X, y)
test_cv_scores = np.array(
[search.cv_results_['split%d_test_score' % s][0]
for s in range(search.n_splits_)]
)
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(
[search.cv_results_['split%d_train_score' % s][0]
for s in range(search.n_splits_)]
)
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert search.cv_results_['param_C'][0] == 1
# scores are the same as above
assert_allclose(test_cv_scores, [1, 1. / 3.])
assert_allclose(train_cv_scores, [1, 1])
# Unweighted mean/std is used
assert test_mean == pytest.approx(np.mean(test_cv_scores))
assert test_std == pytest.approx(np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert train_mean == pytest.approx(1)
assert train_std == pytest.approx(0)
def test_grid_search_cv_results_multimetric():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_searches = []
for scoring in ({'accuracy': make_scorer(accuracy_score),
'recall': make_scorer(recall_score)},
'accuracy', 'recall'):
grid_search = GridSearchCV(SVC(), cv=n_splits,
param_grid=params,
scoring=scoring, refit=False)
grid_search.fit(X, y)
grid_searches.append(grid_search)
compare_cv_results_multimetric_with_single(*grid_searches)
def test_random_search_cv_results_multimetric():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
# Scipy 0.12's stats dists do not accept seed, hence we use param grid
params = dict(C=np.logspace(-4, 1, 3),
gamma=np.logspace(-5, 0, 3, base=0.1))
for refit in (True, False):
random_searches = []
for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'):
# If True, for multi-metric pass refit='accuracy'
if refit:
probability = True
refit = 'accuracy' if isinstance(scoring, tuple) else refit
else:
probability = False
clf = SVC(probability=probability, random_state=42)
random_search = RandomizedSearchCV(clf, n_iter=n_search_iter,
cv=n_splits,
param_distributions=params,
scoring=scoring,
refit=refit, random_state=0)
random_search.fit(X, y)
random_searches.append(random_search)
compare_cv_results_multimetric_with_single(*random_searches)
compare_refit_methods_when_refit_with_acc(
random_searches[0], random_searches[1], refit)
def compare_cv_results_multimetric_with_single(
search_multi, search_acc, search_rec):
"""Compare multi-metric cv_results with the ensemble of multiple
single metric cv_results from single metric grid/random search"""
assert search_multi.multimetric_
assert_array_equal(sorted(search_multi.scorer_),
('accuracy', 'recall'))
cv_results_multi = search_multi.cv_results_
cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v
for k, v in search_acc.cv_results_.items()}
cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v
for k, v in search_rec.cv_results_.items()})
# Check if score and timing are reasonable, also checks if the keys
# are present
assert all((np.all(cv_results_multi[k] <= 1) for k in (
'mean_score_time', 'std_score_time', 'mean_fit_time',
'std_fit_time')))
# Compare the keys, other than time keys, among multi-metric and
# single metric grid search results. np.testing.assert_equal performs a
# deep nested comparison of the two cv_results dicts
np.testing.assert_equal({k: v for k, v in cv_results_multi.items()
if not k.endswith('_time')},
{k: v for k, v in cv_results_acc_rec.items()
if not k.endswith('_time')})
def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit):
"""Compare refit multi-metric search methods with single metric methods"""
assert search_acc.refit == refit
if refit:
assert search_multi.refit == 'accuracy'
else:
assert not search_multi.refit
return # search cannot predict/score without refit
X, y = make_blobs(n_samples=100, n_features=4, random_state=42)
for method in ('predict', 'predict_proba', 'predict_log_proba'):
assert_almost_equal(getattr(search_multi, method)(X),
getattr(search_acc, method)(X))
assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y))
for key in ('best_index_', 'best_score_', 'best_params_'):
assert getattr(search_multi, key) == getattr(search_acc, key)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid,
return_train_score=True)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid,
return_train_score=True)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
assert not np.allclose(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
assert not np.allclose(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold()
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv,
).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert np.all(search.cv_results_[key] >= 0)
assert np.all(search.cv_results_[key] < 1)
for key in ['mean_score_time', 'std_score_time']:
assert search.cv_results_[key][1] >= 0
assert search.cv_results_[key][0] == 0.0
assert np.all(search.cv_results_[key] < 1)
assert hasattr(search, "refit_time_")
assert isinstance(search.refit_time_, float)
assert search.refit_time_ >= 0
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert all(np.in1d(expected_keys, result_keys))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
# FIXME remove test_fit_grid_point as the function will be removed on 0.25
@ignore_warnings(category=FutureWarning)
def test_fit_grid_point():
X, y = make_classification(random_state=0)
cv = StratifiedKFold()
svc = LinearSVC(random_state=0)
scorer = make_scorer(accuracy_score)
for params in ({'C': 0.1}, {'C': 0.01}, {'C': 0.001}):
for train, test in cv.split(X, y):
this_scores, this_params, n_test_samples = fit_grid_point(
X, y, clone(svc), params, train, test,
scorer, verbose=False)
est = clone(svc).set_params(**params)
est.fit(X[train], y[train])
expected_score = scorer(est, X[test], y[test])
# Test the return values of fit_grid_point
assert_almost_equal(this_scores, expected_score)
assert params == this_params
assert n_test_samples == test.size
# Should raise an error upon multimetric scorer
assert_raise_message(ValueError, "For evaluating multiple scores, use "
"sklearn.model_selection.cross_validate instead.",
fit_grid_point, X, y, svc, params, train, test,
{'score': scorer}, verbose=True)
# FIXME remove test_fit_grid_point_deprecated as
# fit_grid_point will be removed on 0.25
def test_fit_grid_point_deprecated():
X, y = make_classification(random_state=0)
svc = LinearSVC(random_state=0)
scorer = make_scorer(accuracy_score)
msg = ("fit_grid_point is deprecated in version 0.23 "
"and will be removed in version 0.25")
params = {'C': 0.1}
train, test = next(StratifiedKFold().split(X, y))
with pytest.warns(FutureWarning, match=msg):
fit_grid_point(X, y, svc, params, train, test, scorer, verbose=False)
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3, cv=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold()
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert not hasattr(gs, "predict_proba")
def test_grid_search_allows_nans():
# Test GridSearchCV with SimpleImputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', SimpleImputer(strategy='mean', missing_values=np.nan)),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def score(self, X=None, Y=None):
return 0.
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
ranks = gs.cv_results_['rank_test_score']
# Check that succeeded estimators have lower ranks
assert ranks[0] <= 2 and ranks[1] <= 2
# Check that failed estimator has the highest rank
assert ranks[clf.FAILING_PARAMETER] == 3
assert gs.best_index_ != clf.FAILING_PARAMETER
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise warning if n_iter is bigger than total parameter space
params = [{'first': [0, 1], 'second': ['a', 'b', 'c']},
{'third': ['two', 'values']}]
sampler = ParameterSampler(params, n_iter=9)
n_iter = 9
grid_size = 8
expected_warning = ('The total space of parameters %d is smaller '
'than n_iter=%d. Running %d iterations. For '
'exhaustive searches, use GridSearchCV.'
% (grid_size, n_iter, grid_size))
assert_warns_message(UserWarning, expected_warning,
list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=8)
samples = list(sampler)
assert len(samples) == 8
for values in ParameterGrid(params):
assert values in samples
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert len(samples) == 99
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert len(set(hashable_samples)) == 99
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert len(samples) == 7
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid, cv=3)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert not hasattr(clf, "predict_proba")
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid, cv=3)
assert not hasattr(clf, "predict_proba")
clf.fit(X, y)
assert not hasattr(clf, "predict_proba")
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]}, cv=3)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples),
return_train_score=True)
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits), return_train_score=True)
gs2.fit(X, y)
# Give generator as a cv parameter
assert isinstance(KFold(n_splits=n_splits,
shuffle=True, random_state=0).split(X, y),
GeneratorType)
gs3 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits, shuffle=True,
random_state=0).split(X, y),
return_train_score=True)
gs3.fit(X, y)
gs4 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits, shuffle=True,
random_state=0), return_train_score=True)
gs4.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# Check if generators are supported as cv and
# that the splits are consistent
np.testing.assert_equal(_pop_time_keys(gs3.cv_results_),
_pop_time_keys(gs4.cv_results_))
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal({k: v for k, v in gs.cv_results_.items()
if not k.endswith('_time')},
{k: v for k, v in gs2.cv_results_.items()
if not k.endswith('_time')})
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True),
return_train_score=True)
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
def test_custom_run_search():
def check_results(results, gscv):
exp_results = gscv.cv_results_
assert sorted(results.keys()) == sorted(exp_results)
for k in results:
if not k.endswith('_time'):
# XXX: results['params'] is a list :|
results[k] = np.asanyarray(results[k])
if results[k].dtype.kind == 'O':
assert_array_equal(exp_results[k], results[k],
err_msg='Checking ' + k)
else:
assert_allclose(exp_results[k], results[k],
err_msg='Checking ' + k)
def fit_grid(param_grid):
return GridSearchCV(clf, param_grid,
return_train_score=True).fit(X, y)
class CustomSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
def _run_search(self, evaluate):
results = evaluate([{'max_depth': 1}, {'max_depth': 2}])
check_results(results, fit_grid({'max_depth': [1, 2]}))
results = evaluate([{'min_samples_split': 5},
{'min_samples_split': 10}])
check_results(results, fit_grid([{'max_depth': [1, 2]},
{'min_samples_split': [5, 10]}]))
# Using regressor to make sure each score differs
clf = DecisionTreeRegressor(random_state=0)
X, y = make_classification(n_samples=100, n_informative=4,
random_state=0)
mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y)
gscv = fit_grid([{'max_depth': [1, 2]},
{'min_samples_split': [5, 10]}])
results = mycv.cv_results_
check_results(results, gscv)
for attr in dir(gscv):
if (attr[0].islower() and attr[-1:] == '_' and
attr not in {'cv_results_', 'best_estimator_',
'refit_time_', 'classes_'}):
assert getattr(gscv, attr) == getattr(mycv, attr), \
"Attribute %s not equal" % attr
def test__custom_fit_no_run_search():
class NoRunSearchSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
def fit(self, X, y=None, groups=None, **fit_params):
return self
# this should not raise any exceptions
NoRunSearchSearchCV(SVC()).fit(X, y)
class BadSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
with pytest.raises(NotImplementedError,
match="_run_search not implemented."):
# this should raise a NotImplementedError
BadSearchCV(SVC()).fit(X, y)
def test_empty_cv_iterator_error():
# Use global X, y
# create cv
cv = KFold(n_splits=3).split(X)
# pop all of it, this should cause the expected ValueError
[u for u in cv]
# cv is empty now
train_size = 100
ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
cv=cv, n_jobs=4)
# assert that this raises an error
with pytest.raises(ValueError,
match='No fits were performed. '
'Was the CV iterator empty\\? '
'Were there no candidates\\?'):
ridge.fit(X[:train_size], y[:train_size])
def test_random_search_bad_cv():
# Use global X, y
class BrokenKFold(KFold):
def get_n_splits(self, *args, **kw):
return 1
# create bad cv
cv = BrokenKFold(n_splits=3)
train_size = 100
ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
cv=cv, n_jobs=4)
# assert that this raises an error
with pytest.raises(ValueError,
match='cv.split and cv.get_n_splits returned '
'inconsistent results. Expected \\d+ '
'splits, got \\d+'):
ridge.fit(X[:train_size], y[:train_size])
def test_n_features_in():
# make sure grid search and random search delegate n_features_in to the
# best estimator
n_features = 4
X, y = make_classification(n_features=n_features)
gbdt = HistGradientBoostingClassifier()
param_grid = {'max_iter': [3, 4]}
gs = GridSearchCV(gbdt, param_grid)
rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1)
assert not hasattr(gs, 'n_features_in_')
assert not hasattr(rs, 'n_features_in_')
gs.fit(X, y)
rs.fit(X, y)
assert gs.n_features_in_ == n_features
assert rs.n_features_in_ == n_features
def test_search_cv__pairwise_property_delegated_to_base_estimator():
"""
Test implementation of BaseSearchCV has the _pairwise property
which matches the _pairwise property of its estimator.
This test make sure _pairwise is delegated to the base estimator.
Non-regression test for issue #13920.
"""
est = BaseEstimator()
attr_message = "BaseSearchCV _pairwise property must match estimator"
for _pairwise_setting in [True, False]:
setattr(est, '_pairwise', _pairwise_setting)
cv = GridSearchCV(est, {'n_neighbors': [10]})
assert _pairwise_setting == cv._pairwise, attr_message
def test_search_cv__pairwise_property_equivalence_of_precomputed():
"""
Test implementation of BaseSearchCV has the _pairwise property
which matches the _pairwise property of its estimator.
This test ensures the equivalence of 'precomputed'.
Non-regression test for issue #13920.
"""
n_samples = 50
n_splits = 2
X, y = make_classification(n_samples=n_samples, random_state=0)
grid_params = {'n_neighbors': [10]}
# defaults to euclidean metric (minkowski p = 2)
clf = KNeighborsClassifier()
cv = GridSearchCV(clf, grid_params, cv=n_splits)
cv.fit(X, y)
preds_original = cv.predict(X)
# precompute euclidean metric to validate _pairwise is working
X_precomputed = euclidean_distances(X)
clf = KNeighborsClassifier(metric='precomputed')
cv = GridSearchCV(clf, grid_params, cv=n_splits)
cv.fit(X_precomputed, y)
preds_precomputed = cv.predict(X_precomputed)
attr_message = "GridSearchCV not identical with precomputed metric"
assert (preds_original == preds_precomputed).all(), attr_message
@pytest.mark.parametrize(
"SearchCV, param_search",
[(GridSearchCV, {'a': [0.1, 0.01]}),
(RandomizedSearchCV, {'a': uniform(1, 3)})]
)
def test_scalar_fit_param(SearchCV, param_search):
# unofficially sanctioned tolerance for scalar values in fit_params
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15805
class TestEstimator(BaseEstimator, ClassifierMixin):
def __init__(self, a=None):
self.a = a
def fit(self, X, y, r=None):
self.r_ = r
def predict(self, X):
return np.zeros(shape=(len(X)))
model = SearchCV(TestEstimator(), param_search)
X, y = make_classification(random_state=42)
model.fit(X, y, r=42)
assert model.best_estimator_.r_ == 42
@pytest.mark.parametrize(
"SearchCV, param_search",
[(GridSearchCV, {'alpha': [0.1, 0.01]}),
(RandomizedSearchCV, {'alpha': uniform(0.01, 0.1)})]
)
def test_scalar_fit_param_compat(SearchCV, param_search):
# check support for scalar values in fit_params, for instance in LightGBM
# that do not exactly respect the scikit-learn API contract but that we do
# not want to break without an explicit deprecation cycle and API
# recommendations for implementing early stopping with a user provided
# validation set. non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15805
X_train, X_valid, y_train, y_valid = train_test_split(
*make_classification(random_state=42), random_state=42
)
class _FitParamClassifier(SGDClassifier):
def fit(self, X, y, sample_weight=None, tuple_of_arrays=None,
scalar_param=None, callable_param=None):
super().fit(X, y, sample_weight=sample_weight)
assert scalar_param > 0
assert callable(callable_param)
# The tuple of arrays should be preserved as tuple.
assert isinstance(tuple_of_arrays, tuple)
assert tuple_of_arrays[0].ndim == 2
assert tuple_of_arrays[1].ndim == 1
return self
def _fit_param_callable():
pass
model = SearchCV(
_FitParamClassifier(), param_search
)
# NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which
# is not the case for the following parameters. But this abuse is common in
# popular third-party libraries and we should tolerate this behavior for
# now and be careful not to break support for those without following
# proper deprecation cycle.
fit_params = {
'tuple_of_arrays': (X_valid, y_valid),
'callable_param': _fit_param_callable,
'scalar_param': 42,
}
model.fit(X_train, y_train, **fit_params)
| [
"sklearn.utils._testing.assert_warns_message",
"sklearn.model_selection.GridSearchCV",
"sklearn.model_selection.StratifiedShuffleSplit",
"sklearn.utils._testing.assert_raises",
"sklearn.tree.DecisionTreeRegressor",
"pickle.dumps",
"sklearn.utils._testing.assert_array_equal",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.asanyarray",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"sklearn.metrics.roc_auc_score",
"scipy.stats.expon",
"sklearn.model_selection.KFold",
"sklearn.utils._mocking.CheckingClassifier",
"numpy.random.RandomState",
"numpy.arange",
"sklearn.model_selection.ParameterGrid",
"numpy.mean",
"sklearn.ensemble.HistGradientBoostingClassifier",
"sklearn.linear_model.SGDClassifier",
"sklearn.model_selection.tests.common.OneTimeSplitter",
"numpy.where",
"sklearn.datasets.make_blobs",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.model_selection.ParameterSampler",
"sklearn.neighbors.KernelDensity",
"itertools.product",
"numpy.dot",
"sklearn.model_selection.LeavePGroupsOut",
"sklearn.model_selection.GroupKFold",
"scipy.sparse.csr_matrix",
"io.StringIO",
"numpy.logspace",
"numpy.allclose",
"numpy.ones",
"sklearn.metrics.pairwise.euclidean_distances",
"sklearn.model_selection.LeaveOneGroupOut",
"sklearn.model_selection.GroupShuffleSplit",
"numpy.in1d",
"sklearn.svm.LinearSVC",
"scipy.stats.uniform",
"sklearn.utils._testing.assert_allclose",
"pytest.raises",
"sklearn.utils._testing.ignore_warnings",
"numpy.std",
"re.sub",
"sklearn.base.BaseEstimator",
"sklearn.linear_model.LinearRegression",
"sklearn.svm.SVC",
"sklearn.datasets.make_classification",
"sklearn.cluster.KMeans",
"pytest.approx",
"scipy.stats.bernoulli",
"sklearn.metrics.f1_score",
"numpy.unique",
"sklearn.utils._testing.assert_almost_equal",
"sklearn.datasets.make_multilabel_classification",
"sklearn.utils._testing.assert_warns",
"sklearn.model_selection.fit_grid_point",
"sklearn.base.clone",
"sklearn.linear_model.Ridge",
"sklearn.metrics.make_scorer",
"sklearn.utils._testing.assert_array_almost_equal",
"pytest.mark.parametrize",
"numpy.zeros",
"functools.partial",
"sklearn.impute.SimpleImputer",
"numpy.all",
"sklearn.utils._testing.assert_raise_message",
"pytest.warns",
"sklearn.model_selection.RandomizedSearchCV"
] | [((4079, 4125), 'numpy.array', 'np.array', (['[[-1, -1], [-2, -1], [1, 1], [2, 1]]'], {}), '([[-1, -1], [-2, -1], [1, 1], [2, 1]])\n', (4087, 4125), True, 'import numpy as np\n'), ((4130, 4152), 'numpy.array', 'np.array', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (4138, 4152), True, 'import numpy as np\n'), ((4385, 4686), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input, error_type, error_message"""', '[(0, TypeError, \'Parameter .* is not a dict or a list \\\\(0\\\\)\'), ([{\'foo\':\n [0]}, 0], TypeError, \'Parameter .* is not a dict \\\\(0\\\\)\'), ({\'foo\': 0},\n TypeError, "Parameter.* value is not iterable .*\\\\(key=\'foo\', value=0\\\\)")]'], {}), '(\'input, error_type, error_message\', [(0, TypeError,\n \'Parameter .* is not a dict or a list \\\\(0\\\\)\'), ([{\'foo\': [0]}, 0],\n TypeError, \'Parameter .* is not a dict \\\\(0\\\\)\'), ({\'foo\': 0},\n TypeError, "Parameter.* value is not iterable .*\\\\(key=\'foo\', value=0\\\\)")]\n )\n', (4408, 4686), False, 'import pytest\n'), ((7934, 8005), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""SearchCV"""', '[GridSearchCV, RandomizedSearchCV]'], {}), "('SearchCV', [GridSearchCV, RandomizedSearchCV])\n", (7957, 8005), False, 'import pytest\n'), ((22950, 23001), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""out_bound_value"""', '[-1, 2]'], {}), "('out_bound_value', [-1, 2])\n", (22973, 23001), False, 'import pytest\n'), ((23003, 23075), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""search_cv"""', '[RandomizedSearchCV, GridSearchCV]'], {}), "('search_cv', [RandomizedSearchCV, GridSearchCV])\n", (23026, 23075), False, 'import pytest\n'), ((35001, 35192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""SearchCV, specialized_params"""', "[(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {\n 'param_distributions': {'C': [1, 10]}, 'n_iter': 2})]"], {}), "('SearchCV, specialized_params', [(GridSearchCV, {\n 'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {\n 'param_distributions': {'C': [1, 10]}, 'n_iter': 2})])\n", (35024, 35192), False, 'import pytest\n'), ((43521, 43538), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {}), '()\n', (43536, 43538), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((46129, 46168), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'FutureWarning'}), '(category=FutureWarning)\n', (46144, 46168), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((4971, 4993), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['params1'], {}), '(params1)\n', (4984, 4993), False, 'from sklearn.model_selection import ParameterGrid\n'), ((5229, 5251), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['params2'], {}), '(params2)\n', (5242, 5251), False, 'from sklearn.model_selection import ParameterGrid\n'), ((5793, 5810), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['{}'], {}), '({})\n', (5806, 5810), False, 'from sklearn.model_selection import ParameterGrid\n'), ((5916, 5960), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['IndexError', '(lambda : empty[1])'], {}), '(IndexError, lambda : empty[1])\n', (5929, 5960), False, 'from sklearn.utils._testing import assert_raises\n'), ((5977, 6026), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (["[{'C': [1, 10]}, {}, {'C': [0.5]}]"], {}), "([{'C': [1, 10]}, {}, {'C': [0.5]}])\n", (5990, 6026), False, 'from sklearn.model_selection import ParameterGrid\n'), ((6316, 6376), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'cv': '(3)', 'verbose': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)\n", (6328, 6376), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6488, 6498), 'io.StringIO', 'StringIO', ([], {}), '()\n', (6496, 6498), False, 'from io import StringIO\n'), ((6612, 6690), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (["grid_search.cv_results_['param_foo_param'].data", '[1, 2, 3]'], {}), "(grid_search.cv_results_['param_foo_param'].data, [1, 2, 3])\n", (6630, 6690), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((6956, 7004), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'grid_search.fit', 'X', 'y'], {}), '(ValueError, grid_search.fit, X, y)\n', (6969, 7004), False, 'from sklearn.utils._testing import assert_raises\n'), ((7257, 7293), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipe', 'param_grid'], {'cv': '(2)'}), '(pipe, param_grid, cv=2)\n', (7269, 7293), False, 'from sklearn.model_selection import GridSearchCV\n'), ((8098, 8125), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (8106, 8125), True, 'import numpy as np\n'), ((8136, 8192), 'sklearn.utils._mocking.CheckingClassifier', 'CheckingClassifier', ([], {'expected_fit_params': "['spam', 'eggs']"}), "(expected_fit_params=['spam', 'eggs'])\n", (8154, 8192), False, 'from sklearn.utils._mocking import CheckingClassifier, MockDataFrame\n'), ((8935, 8960), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (8944, 8960), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((8972, 9009), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'random_state': '(0)', 'centers': '(2)'}), '(random_state=0, centers=2)\n', (8982, 9009), False, 'from sklearn.datasets import make_blobs\n'), ((9101, 9149), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': Cs}"], {'scoring': '"""accuracy"""'}), "(clf, {'C': Cs}, scoring='accuracy')\n", (9113, 9149), False, 'from sklearn.model_selection import GridSearchCV\n'), ((9204, 9261), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf_no_score', "{'C': Cs}"], {'scoring': '"""accuracy"""'}), "(clf_no_score, {'C': Cs}, scoring='accuracy')\n", (9216, 9261), False, 'from sklearn.model_selection import GridSearchCV\n'), ((9698, 9735), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf_no_score', "{'C': Cs}"], {}), "(clf_no_score, {'C': Cs})\n", (9710, 9735), False, 'from sklearn.model_selection import GridSearchCV\n'), ((9740, 9818), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['TypeError', '"""no scoring"""', 'grid_search_no_score.fit', '[[1]]'], {}), "(TypeError, 'no scoring', grid_search_no_score.fit, [[1]])\n", (9760, 9818), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((9894, 9969), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_classes': '(2)', 'flip_y': '(0.2)', 'random_state': '(0)'}), '(n_samples=100, n_classes=2, flip_y=0.2, random_state=0)\n', (9913, 9969), False, 'from sklearn.datasets import make_classification\n'), ((10010, 10035), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (10019, 10035), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((10953, 11006), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['score_accuracy', 'score_no_scoring'], {}), '(score_accuracy, score_no_scoring)\n', (10972, 11006), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((11011, 11061), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['score_auc', 'score_no_score_auc'], {}), '(score_auc, score_no_score_auc)\n', (11030, 11061), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((11248, 11272), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (11269, 11272), True, 'import numpy as np\n'), ((11285, 11347), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(15)', 'n_classes': '(2)', 'random_state': '(0)'}), '(n_samples=15, n_classes=2, random_state=0)\n', (11304, 11347), False, 'from sklearn.datasets import make_classification\n'), ((11394, 11419), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (11403, 11419), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((12169, 12196), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (12177, 12196), True, 'import numpy as np\n'), ((12318, 12396), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['grid_search.best_estimator_.classes_', 'grid_search.classes_'], {}), '(grid_search.best_estimator_.classes_, grid_search.classes_)\n', (12336, 12396), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((13212, 13255), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1]}"], {'cv': '(3)'}), "(clf, {'foo_param': [1]}, cv=3)\n", (13224, 13255), False, 'from sklearn.model_selection import GridSearchCV\n'), ((13350, 13409), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['clf', "{'foo_param': [0]}"], {'n_iter': '(1)', 'cv': '(3)'}), "(clf, {'foo_param': [0]}, n_iter=1, cv=3)\n", (13368, 13409), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((15168, 15234), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (15187, 15234), False, 'from sklearn.datasets import make_classification\n'), ((15246, 15257), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (15255, 15257), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((15267, 15303), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (15279, 15303), False, 'from sklearn.model_selection import GridSearchCV\n'), ((15308, 15355), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'cv.fit', 'X_[:180]', 'y_'], {}), '(ValueError, cv.fit, X_[:180], y_)\n', (15321, 15355), False, 'from sklearn.utils._testing import assert_raises\n'), ((15410, 15476), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (15429, 15476), False, 'from sklearn.datasets import make_classification\n'), ((15553, 15570), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (15556, 15570), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((15580, 15609), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'param_dict'], {}), '(clf, param_dict)\n', (15592, 15609), False, 'from sklearn.model_selection import GridSearchCV\n'), ((15640, 15675), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1.0)', 'kernel': '"""rbf"""', 'gamma': '(0.1)'}), "(C=1.0, kernel='rbf', gamma=0.1)\n", (15643, 15675), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((15701, 15766), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['clf.dual_coef_', 'cv.best_estimator_.dual_coef_'], {}), '(clf.dual_coef_, cv.best_estimator_.dual_coef_)\n', (15719, 15766), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((16175, 16192), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (16178, 16192), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((16197, 16426), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""Parameter grid for parameter (C) needs to be a list or numpy array, but got (<class \'int\'>). Single values need to be wrapped in a list with one element."""', 'GridSearchCV', 'clf', 'param_dict'], {}), '(ValueError,\n "Parameter grid for parameter (C) needs to be a list or numpy array, but got (<class \'int\'>). Single values need to be wrapped in a list with one element."\n , GridSearchCV, clf, param_dict)\n', (16217, 16426), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((16514, 16519), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (16517, 16519), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((16524, 16666), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""Parameter values for parameter (C) need to be a non-empty sequence."""', 'GridSearchCV', 'clf', 'param_dict'], {}), "(ValueError,\n 'Parameter values for parameter (C) need to be a non-empty sequence.',\n GridSearchCV, clf, param_dict)\n", (16544, 16666), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((16727, 16744), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (16730, 16744), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((16749, 16978), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""Parameter grid for parameter (C) needs to be a list or numpy array, but got (<class \'str\'>). Single values need to be wrapped in a list with one element."""', 'GridSearchCV', 'clf', 'param_dict'], {}), '(ValueError,\n "Parameter grid for parameter (C) needs to be a list or numpy array, but got (<class \'str\'>). Single values need to be wrapped in a list with one element."\n , GridSearchCV, clf, param_dict)\n', (16769, 16978), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((17079, 17084), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (17082, 17084), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((17089, 17145), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'GridSearchCV', 'clf', 'param_dict'], {}), '(ValueError, GridSearchCV, clf, param_dict)\n', (17102, 17145), False, 'from sklearn.utils._testing import assert_raises\n'), ((17262, 17328), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (17281, 17328), False, 'from sklearn.datasets import make_classification\n'), ((17340, 17351), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (17349, 17351), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((17361, 17397), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (17373, 17397), False, 'from sklearn.model_selection import GridSearchCV\n'), ((17502, 17519), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_'], {}), '(X_)\n', (17515, 17519), True, 'import scipy.sparse as sp\n'), ((17530, 17541), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (17539, 17541), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((17551, 17587), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (17563, 17587), False, 'from sklearn.model_selection import GridSearchCV\n'), ((17810, 17876), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (17829, 17876), False, 'from sklearn.datasets import make_classification\n'), ((17888, 17899), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (17897, 17899), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((17909, 17959), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {'scoring': '"""f1"""'}), "(clf, {'C': [0.1, 1.0]}, scoring='f1')\n", (17921, 17959), False, 'from sklearn.model_selection import GridSearchCV\n'), ((18064, 18081), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_'], {}), '(X_)\n', (18077, 18081), True, 'import scipy.sparse as sp\n'), ((18092, 18103), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (18101, 18103), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((18113, 18163), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {'scoring': '"""f1"""'}), "(clf, {'C': [0.1, 1.0]}, scoring='f1')\n", (18125, 18163), False, 'from sklearn.model_selection import GridSearchCV\n'), ((18265, 18300), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['y_pred', 'y_pred2'], {}), '(y_pred, y_pred2)\n', (18283, 18300), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((18614, 18659), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_loss'], {'greater_is_better': '(False)'}), '(f1_loss, greater_is_better=False)\n', (18625, 18659), False, 'from sklearn.metrics import make_scorer\n'), ((18669, 18721), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {'scoring': 'F1Loss'}), "(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)\n", (18681, 18721), False, 'from sklearn.model_selection import GridSearchCV\n'), ((18842, 18877), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['y_pred', 'y_pred3'], {}), '(y_pred, y_pred3)\n', (18860, 18877), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((19053, 19119), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (19072, 19119), False, 'from sklearn.datasets import make_classification\n'), ((19211, 19239), 'numpy.dot', 'np.dot', (['X_[:180]', 'X_[:180].T'], {}), '(X_[:180], X_[:180].T)\n', (19217, 19239), True, 'import numpy as np\n'), ((19274, 19299), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (19277, 19299), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((19309, 19345), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (19321, 19345), False, 'from sklearn.model_selection import GridSearchCV\n'), ((19458, 19486), 'numpy.dot', 'np.dot', (['X_[180:]', 'X_[:180].T'], {}), '(X_[180:], X_[:180].T)\n', (19464, 19486), True, 'import numpy as np\n'), ((19919, 19937), 'numpy.zeros', 'np.zeros', (['(10, 20)'], {}), '((10, 20))\n', (19927, 19937), True, 'import numpy as np\n'), ((19952, 19966), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (19959, 19966), True, 'import numpy as np\n'), ((19978, 20003), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (19981, 20003), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((20013, 20049), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (20025, 20049), False, 'from sklearn.model_selection import GridSearchCV\n'), ((20054, 20105), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'cv.fit', 'K_train', 'y_train'], {}), '(ValueError, cv.fit, K_train, y_train)\n', (20067, 20105), False, 'from sklearn.utils._testing import assert_raises\n'), ((20666, 20693), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (20674, 20693), True, 'import numpy as np\n'), ((21845, 21910), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (21864, 21910), False, 'from sklearn.datasets import make_classification\n'), ((22574, 22639), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (22593, 22639), False, 'from sklearn.datasets import make_classification\n'), ((23443, 23508), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (23462, 23508), False, 'from sklearn.datasets import make_classification\n'), ((24208, 24273), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (24227, 24273), False, 'from sklearn.datasets import make_classification\n'), ((24961, 25039), 'sklearn.utils._mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'check_X', 'check_y': 'check_y', 'methods_to_check': "['fit']"}), "(check_X=check_X, check_y=check_y, methods_to_check=['fit'])\n", (24979, 25039), False, 'from sklearn.utils._mocking import CheckingClassifier, MockDataFrame\n'), ((25073, 25116), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {}), "(clf, {'foo_param': [1, 2, 3]})\n", (25085, 25116), False, 'from sklearn.model_selection import GridSearchCV\n'), ((25316, 25343), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (25324, 25343), True, 'import numpy as np\n'), ((25463, 25480), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (25468, 25480), False, 'from sklearn.model_selection import KFold\n'), ((25499, 25549), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'cv': 'cv'}), "(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n", (25511, 25549), False, 'from sklearn.model_selection import GridSearchCV\n'), ((25752, 25779), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (25760, 25779), True, 'import numpy as np\n'), ((25899, 25916), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (25904, 25916), False, 'from sklearn.model_selection import KFold\n'), ((25935, 25985), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'cv': 'cv'}), "(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n", (25947, 25985), False, 'from sklearn.model_selection import GridSearchCV\n'), ((26411, 26438), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (26419, 26438), True, 'import numpy as np\n'), ((27111, 27151), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(50)', 'random_state': '(0)'}), '(n_samples=50, random_state=0)\n', (27121, 27151), False, 'from sklearn.datasets import make_blobs\n'), ((27161, 27208), 'sklearn.cluster.KMeans', 'KMeans', ([], {'random_state': '(0)', 'init': '"""random"""', 'n_init': '(1)'}), "(random_state=0, init='random', n_init=1)\n", (27167, 27208), False, 'from sklearn.cluster import KMeans\n'), ((28370, 28447), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'cluster_std': '(0.1)', 'random_state': '(1)', 'centers': '[[0, 1], [1, 0], [0, 0]]'}), '(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]])\n', (28380, 28447), False, 'from sklearn.datasets import make_blobs\n'), ((28923, 29011), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', ([], {'param_distributions': 'param_distributions', 'n_iter': '(10)', 'random_state': '(0)'}), '(param_distributions=param_distributions, n_iter=10,\n random_state=0)\n', (28939, 29011), False, 'from sklearn.model_selection import ParameterSampler\n'), ((29362, 29449), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', ([], {'param_distributions': 'param_distributions', 'n_iter': '(3)', 'random_state': '(0)'}), '(param_distributions=param_distributions, n_iter=3,\n random_state=0)\n', (29378, 29449), False, 'from sklearn.model_selection import ParameterSampler\n'), ((30970, 31034), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=50, n_features=4, random_state=42)\n', (30989, 31034), False, 'from sklearn.datasets import make_classification\n'), ((33149, 33213), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=50, n_features=4, random_state=42)\n', (33168, 33213), False, 'from sklearn.datasets import make_classification\n'), ((35383, 35501), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'centers': '[[0, 0], [1, 0], [0, 1], [1, 1]]', 'random_state': '(0)', 'cluster_std': '(0.1)', 'shuffle': '(False)', 'n_samples': '(80)'}), '(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,\n cluster_std=0.1, shuffle=False, n_samples=80)\n', (35393, 35501), False, 'from sklearn.datasets import make_blobs\n'), ((35651, 35685), 'numpy.ones', 'np.ones', (['X.shape[0]'], {'dtype': 'np.bool'}), '(X.shape[0], dtype=np.bool)\n', (35658, 35685), True, 'import numpy as np\n'), ((36704, 36751), 'sklearn.utils._testing.assert_allclose', 'assert_allclose', (['test_cv_scores', '[1, 1.0 / 3.0]'], {}), '(test_cv_scores, [1, 1.0 / 3.0])\n', (36719, 36751), False, 'from sklearn.utils._testing import assert_allclose\n'), ((36754, 36794), 'sklearn.utils._testing.assert_allclose', 'assert_allclose', (['train_cv_scores', '[1, 1]'], {}), '(train_cv_scores, [1, 1])\n', (36769, 36794), False, 'from sklearn.utils._testing import assert_allclose\n'), ((37192, 37256), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=50, n_features=4, random_state=42)\n', (37211, 37256), False, 'from sklearn.datasets import make_classification\n'), ((37954, 38018), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=50, n_features=4, random_state=42)\n', (37973, 38018), False, 'from sklearn.datasets import make_classification\n'), ((41100, 41156), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (41110, 41156), False, 'from sklearn.datasets import make_blobs\n'), ((41616, 41657), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(50)', 'random_state': '(42)'}), '(n_samples=50, random_state=42)\n', (41626, 41657), False, 'from sklearn.datasets import make_blobs\n'), ((43260, 43267), 'sklearn.model_selection.KFold', 'KFold', ([], {}), '()\n', (43265, 43267), False, 'from sklearn.model_selection import KFold\n'), ((43578, 43603), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (43587, 43603), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((43673, 43726), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svc', "{'C': [0, 1]}"], {'cv': '(2)', 'error_score': '(0)'}), "(svc, {'C': [0, 1]}, cv=2, error_score=0)\n", (43685, 43726), False, 'from sklearn.model_selection import GridSearchCV\n'), ((43736, 43805), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['svc', "{'C': [0, 1]}"], {'cv': '(2)', 'error_score': '(0)', 'n_iter': '(2)'}), "(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)\n", (43754, 43805), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((44646, 44671), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (44655, 44671), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((44683, 44720), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'random_state': '(0)', 'centers': '(2)'}), '(random_state=0, centers=2)\n', (44693, 44720), False, 'from sklearn.datasets import make_blobs\n'), ((46207, 46242), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(0)'}), '(random_state=0)\n', (46226, 46242), False, 'from sklearn.datasets import make_classification\n'), ((46252, 46269), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {}), '()\n', (46267, 46269), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((46280, 46305), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (46289, 46305), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((46319, 46346), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (46330, 46346), False, 'from sklearn.metrics import make_scorer\n'), ((47027, 47240), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""For evaluating multiple scores, use sklearn.model_selection.cross_validate instead."""', 'fit_grid_point', 'X', 'y', 'svc', 'params', 'train', 'test', "{'score': scorer}"], {'verbose': '(True)'}), "(ValueError,\n 'For evaluating multiple scores, use sklearn.model_selection.cross_validate instead.'\n , fit_grid_point, X, y, svc, params, train, test, {'score': scorer},\n verbose=True)\n", (47047, 47240), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((47447, 47482), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(0)'}), '(random_state=0)\n', (47466, 47482), False, 'from sklearn.datasets import make_classification\n'), ((47493, 47518), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (47502, 47518), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((47532, 47559), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (47543, 47559), False, 'from sklearn.metrics import make_scorer\n'), ((47985, 48046), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'refit': '(True)', 'cv': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3)\n", (47997, 48046), False, 'from sklearn.model_selection import GridSearchCV\n'), ((48276, 48353), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'refit': '(True)', 'n_iter': '(3)', 'cv': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3, cv=3)\n", (48294, 48353), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((48717, 48786), 'sklearn.datasets.make_multilabel_classification', 'make_multilabel_classification', ([], {'return_indicator': '(True)', 'random_state': '(0)'}), '(return_indicator=True, random_state=0)\n', (48747, 48786), False, 'from sklearn.datasets import make_multilabel_classification\n'), ((48888, 48895), 'sklearn.model_selection.KFold', 'KFold', ([], {}), '()\n', (48893, 48895), False, 'from sklearn.model_selection import KFold\n'), ((50533, 50555), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(False)'}), '(probability=False)\n', (50536, 50555), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((51707, 51771), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(20)', 'n_features': '(10)', 'random_state': '(0)'}), '(n_samples=20, n_features=10, random_state=0)\n', (51726, 51771), False, 'from sklearn.datasets import make_classification\n'), ((52133, 52233), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "[{'parameter': [0, 1, 2]}]"], {'scoring': '"""accuracy"""', 'refit': '(False)', 'error_score': '(0.0)'}), "(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=\n False, error_score=0.0)\n", (52145, 52233), False, 'from sklearn.model_selection import GridSearchCV\n'), ((52255, 52299), 'sklearn.utils._testing.assert_warns', 'assert_warns', (['FitFailedWarning', 'gs.fit', 'X', 'y'], {}), '(FitFailedWarning, gs.fit, X, y)\n', (52267, 52299), False, 'from sklearn.utils._testing import assert_warns\n'), ((52983, 53027), 'sklearn.utils._testing.assert_warns', 'assert_warns', (['FitFailedWarning', 'gs.fit', 'X', 'y'], {}), '(FitFailedWarning, gs.fit, X, y)\n', (52995, 53027), False, 'from sklearn.utils._testing import assert_warns\n'), ((53719, 53783), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(20)', 'n_features': '(10)', 'random_state': '(0)'}), '(n_samples=20, n_features=10, random_state=0)\n', (53738, 53783), False, 'from sklearn.datasets import make_classification\n'), ((53905, 54009), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "[{'parameter': [0, 1, 2]}]"], {'scoring': '"""accuracy"""', 'refit': '(False)', 'error_score': '"""raise"""'}), "(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=\n False, error_score='raise')\n", (53917, 54009), False, 'from sklearn.model_selection import GridSearchCV\n'), ((54105, 54144), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'gs.fit', 'X', 'y'], {}), '(ValueError, gs.fit, X, y)\n', (54118, 54144), False, 'from sklearn.utils._testing import assert_raises\n'), ((54375, 54409), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['params'], {'n_iter': '(9)'}), '(params, n_iter=9)\n', (54391, 54409), False, 'from sklearn.model_selection import ParameterSampler\n'), ((54711, 54777), 'sklearn.utils._testing.assert_warns_message', 'assert_warns_message', (['UserWarning', 'expected_warning', 'list', 'sampler'], {}), '(UserWarning, expected_warning, list, sampler)\n', (54731, 54777), False, 'from sklearn.utils._testing import assert_warns_message\n'), ((54884, 54918), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['params'], {'n_iter': '(8)'}), '(params, n_iter=8)\n', (54900, 54918), False, 'from sklearn.model_selection import ParameterSampler\n'), ((54994, 55015), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['params'], {}), '(params)\n', (55007, 55015), False, 'from sklearn.model_selection import ParameterGrid\n'), ((55183, 55235), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['params'], {'n_iter': '(99)', 'random_state': '(42)'}), '(params, n_iter=99, random_state=42)\n', (55199, 55235), False, 'from sklearn.model_selection import ParameterSampler\n'), ((55573, 55620), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['params_distribution'], {'n_iter': '(7)'}), '(params_distribution, n_iter=7)\n', (55589, 55620), False, 'from sklearn.model_selection import ParameterSampler\n'), ((56777, 56802), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (56786, 56802), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((56813, 56866), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf'], {'param_grid': "{'C': [0.1, 0.2]}", 'cv': '(3)'}), "(clf, param_grid={'C': [0.1, 0.2]}, cv=3)\n", (56825, 56866), False, 'from sklearn.model_selection import GridSearchCV\n'), ((57046, 57102), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': 'n_samples', 'random_state': '(0)'}), '(n_samples=n_samples, random_state=0)\n', (57065, 57102), False, 'from sklearn.datasets import make_classification\n'), ((60560, 60620), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'cv': '(3)', 'verbose': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)\n", (60572, 60620), False, 'from sklearn.model_selection import GridSearchCV\n'), ((60727, 60762), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['X', 'X_round_trip'], {}), '(X, X_round_trip)\n', (60745, 60762), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((62223, 62260), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (62244, 62260), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((62272, 62339), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_informative': '(4)', 'random_state': '(0)'}), '(n_samples=100, n_informative=4, random_state=0)\n', (62291, 62339), False, 'from sklearn.datasets import make_classification\n'), ((65082, 65124), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_features': 'n_features'}), '(n_features=n_features)\n', (65101, 65124), False, 'from sklearn.datasets import make_classification\n'), ((65136, 65168), 'sklearn.ensemble.HistGradientBoostingClassifier', 'HistGradientBoostingClassifier', ([], {}), '()\n', (65166, 65168), False, 'from sklearn.ensemble import HistGradientBoostingClassifier\n'), ((65216, 65246), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['gbdt', 'param_grid'], {}), '(gbdt, param_grid)\n', (65228, 65246), False, 'from sklearn.model_selection import GridSearchCV\n'), ((65256, 65302), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['gbdt', 'param_grid'], {'n_iter': '(1)'}), '(gbdt, param_grid, n_iter=1)\n', (65274, 65302), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((65849, 65864), 'sklearn.base.BaseEstimator', 'BaseEstimator', ([], {}), '()\n', (65862, 65864), False, 'from sklearn.base import BaseEstimator, ClassifierMixin\n'), ((66512, 66568), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': 'n_samples', 'random_state': '(0)'}), '(n_samples=n_samples, random_state=0)\n', (66531, 66568), False, 'from sklearn.datasets import make_classification\n'), ((66673, 66695), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (66693, 66695), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((66705, 66748), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid_params'], {'cv': 'n_splits'}), '(clf, grid_params, cv=n_splits)\n', (66717, 66748), False, 'from sklearn.model_selection import GridSearchCV\n'), ((66889, 66911), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['X'], {}), '(X)\n', (66908, 66911), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((66922, 66964), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'metric': '"""precomputed"""'}), "(metric='precomputed')\n", (66942, 66964), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((66974, 67017), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid_params'], {'cv': 'n_splits'}), '(clf, grid_params, cv=n_splits)\n', (66986, 67017), False, 'from sklearn.model_selection import GridSearchCV\n'), ((67924, 67960), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(42)'}), '(random_state=42)\n', (67943, 67960), False, 'from sklearn.datasets import make_classification\n'), ((3263, 3275), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (3272, 3275), True, 'import numpy as np\n'), ((4781, 4827), 'pytest.raises', 'pytest.raises', (['error_type'], {'match': 'error_message'}), '(error_type, match=error_message)\n', (4794, 4827), False, 'import pytest\n'), ((4345, 4381), 'functools.partial', 'partial', (['ParameterSampler'], {'n_iter': '(10)'}), '(ParameterSampler, n_iter=10)\n', (4352, 4381), False, 'from functools import partial\n'), ((8480, 8524), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': 'err_msg'}), '(AssertionError, match=err_msg)\n', (8493, 8524), False, 'import pytest\n'), ((8639, 8683), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': 'err_msg'}), '(AssertionError, match=err_msg)\n', (8652, 8683), False, 'import pytest\n'), ((11460, 11478), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (11476, 11478), False, 'from sklearn.model_selection import LeaveOneGroupOut\n'), ((11480, 11498), 'sklearn.model_selection.LeavePGroupsOut', 'LeavePGroupsOut', (['(2)'], {}), '(2)\n', (11495, 11498), False, 'from sklearn.model_selection import LeavePGroupsOut\n'), ((11517, 11539), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (11527, 11539), False, 'from sklearn.model_selection import GroupKFold\n'), ((11541, 11560), 'sklearn.model_selection.GroupShuffleSplit', 'GroupShuffleSplit', ([], {}), '()\n', (11558, 11560), False, 'from sklearn.model_selection import GroupShuffleSplit\n'), ((11600, 11630), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'cv': 'cv'}), '(clf, grid, cv=cv)\n', (11612, 11630), False, 'from sklearn.model_selection import GridSearchCV\n'), ((11639, 11735), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""The \'groups\' parameter should not be None."""', 'gs.fit', 'X', 'y'], {}), '(ValueError,\n "The \'groups\' parameter should not be None.", gs.fit, X, y)\n', (11659, 11735), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((11848, 11865), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {}), '()\n', (11863, 11865), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((11867, 11891), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {}), '()\n', (11889, 11891), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((11935, 11965), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'cv': 'cv'}), '(clf, grid, cv=cv)\n', (11947, 11965), False, 'from sklearn.model_selection import GridSearchCV\n'), ((12250, 12275), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (12259, 12275), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((12512, 12519), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (12517, 12519), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((12726, 12751), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (12735, 12751), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((12920, 12945), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (12929, 12945), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((13688, 13750), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'refit': '(False)', 'cv': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3)\n", (13700, 13750), False, 'from sklearn.model_selection import GridSearchCV\n'), ((17052, 17067), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (17059, 17067), True, 'import numpy as np\n'), ((17704, 17730), 'numpy.mean', 'np.mean', (['(y_pred == y_pred2)'], {}), '(y_pred == y_pred2)\n', (17711, 17730), True, 'import numpy as np\n'), ((19554, 19579), 'numpy.mean', 'np.mean', (['(y_pred == y_test)'], {}), '(y_pred == y_test)\n', (19561, 19579), True, 'import numpy as np\n'), ((20428, 20448), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (20436, 20448), True, 'import numpy as np\n'), ((21283, 21348), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (21302, 21348), False, 'from sklearn.datasets import make_classification\n'), ((21965, 21991), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (21974, 21991), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((22695, 22721), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (22704, 22721), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((22828, 22900), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""best_index_ returned is not an integer"""'}), "(TypeError, match='best_index_ returned is not an integer')\n", (22841, 22900), False, 'import pytest\n'), ((23561, 23587), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (23570, 23587), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((23688, 23753), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""best_index_ index out of range"""'}), "(IndexError, match='best_index_ index out of range')\n", (23701, 23753), False, 'import pytest\n'), ((24332, 24359), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (24343, 24359), False, 'from sklearn.metrics import make_scorer\n'), ((24405, 24431), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (24414, 24431), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((26743, 26801), 'sklearn.utils._mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'check_df', 'check_y': 'check_series'}), '(check_X=check_df, check_y=check_series)\n', (26761, 26801), False, 'from sklearn.utils._mocking import CheckingClassifier, MockDataFrame\n'), ((26825, 26868), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {}), "(clf, {'foo_param': [1, 2, 3]})\n", (26837, 26868), False, 'from sklearn.model_selection import GridSearchCV\n'), ((28495, 28510), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '()\n', (28508, 28510), False, 'from sklearn.neighbors import KernelDensity\n'), ((28894, 28907), 'scipy.stats.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (28901, 28907), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((29633, 29721), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', ([], {'param_distributions': 'param_distributions', 'n_iter': '(10)', 'random_state': '(0)'}), '(param_distributions=param_distributions, n_iter=10,\n random_state=0)\n', (29649, 29721), False, 'from sklearn.model_selection import ParameterSampler\n'), ((31818, 31823), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (31821, 31823), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((34001, 34006), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (34004, 34006), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((35961, 35966), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (35964, 35966), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((37074, 37090), 'pytest.approx', 'pytest.approx', (['(1)'], {}), '(1)\n', (37087, 37090), False, 'import pytest\n'), ((37115, 37131), 'pytest.approx', 'pytest.approx', (['(0)'], {}), '(0)\n', (37128, 37131), False, 'import pytest\n'), ((39745, 39778), 're.sub', 're.sub', (['"""_score$"""', '"""_accuracy"""', 'k'], {}), "('_score$', '_accuracy', k)\n", (39751, 39778), False, 'import re\n'), ((41854, 41859), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (41857, 41859), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((41979, 41984), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (41982, 41984), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((42391, 42483), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (["cv_results['mean_test_score'][0]", "cv_results['mean_test_score'][1]"], {}), "(cv_results['mean_test_score'][0], cv_results[\n 'mean_test_score'][1])\n", (42410, 42483), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((42515, 42609), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (["cv_results['mean_train_score'][0]", "cv_results['mean_train_score'][1]"], {}), "(cv_results['mean_train_score'][0], cv_results[\n 'mean_train_score'][1])\n", (42534, 42609), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((42966, 43035), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (["search.cv_results_['rank_test_score']", '[1, 1, 3]'], {}), "(search.cv_results_['rank_test_score'], [1, 1, 3])\n", (42985, 43035), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((43151, 43174), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (43172, 43174), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((43176, 43200), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (43198, 43200), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((43414, 43490), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (["grid_search.cv_results_['param_random_state']", '[0, None]'], {}), "(grid_search.cv_results_['param_random_state'], [0, None])\n", (43432, 43490), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((44800, 44856), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': Cs}"], {'scoring': 'score', 'cv': 'n_splits'}), "(clf, {'C': Cs}, scoring=score, cv=n_splits)\n", (44812, 44856), False, 'from sklearn.model_selection import GridSearchCV\n'), ((45243, 45277), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (45258, 45277), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((47757, 47795), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {'match': 'msg'}), '(FutureWarning, match=msg)\n', (47769, 47795), False, 'import pytest\n'), ((47805, 47874), 'sklearn.model_selection.fit_grid_point', 'fit_grid_point', (['X', 'y', 'svc', 'params', 'train', 'test', 'scorer'], {'verbose': '(False)'}), '(X, y, svc, params, train, test, scorer, verbose=False)\n', (47819, 47874), False, 'from sklearn.model_selection import fit_grid_point\n'), ((48112, 48137), 'pickle.dumps', 'pickle.dumps', (['grid_search'], {}), '(grid_search)\n', (48124, 48137), False, 'import pickle\n'), ((48462, 48489), 'pickle.dumps', 'pickle.dumps', (['random_search'], {}), '(random_search)\n', (48474, 48489), False, 'import pickle\n'), ((48915, 48952), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (48936, 48952), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((48972, 49010), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (48994, 49010), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((49093, 49133), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['est', 'est_parameters'], {'cv': 'cv'}), '(est, est_parameters, cv=cv)\n', (49105, 49133), False, 'from sklearn.model_selection import GridSearchCV\n'), ((49721, 49777), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['est', 'est_parameters'], {'cv': 'cv', 'n_iter': '(3)'}), '(est, est_parameters, cv=cv, n_iter=3)\n', (49739, 49777), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((51455, 51475), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (51463, 51475), True, 'import numpy as np\n'), ((55517, 55531), 'scipy.stats.bernoulli', 'bernoulli', (['(0.5)'], {}), '(0.5)\n', (55526, 55531), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((57126, 57151), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (57135, 57151), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((57421, 57446), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (57430, 57446), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((57819, 57844), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (57828, 57844), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((58120, 58145), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (58129, 58145), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((59502, 59527), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (59511, 59527), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((60250, 60317), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['per_param_scores[0]', 'per_param_scores[1]'], {}), '(per_param_scores[0], per_param_scores[1])\n', (60275, 60317), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((60360, 60427), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['per_param_scores[2]', 'per_param_scores[3]'], {}), '(per_param_scores[2], per_param_scores[3])\n', (60385, 60427), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((63418, 63490), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""_run_search not implemented."""'}), "(NotImplementedError, match='_run_search not implemented.')\n", (63431, 63490), False, 'import pytest\n'), ((63874, 63881), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (63879, 63881), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((64011, 64136), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""No fits were performed. Was the CV iterator empty\\\\? Were there no candidates\\\\?"""'}), "(ValueError, match=\n 'No fits were performed. Was the CV iterator empty\\\\? Were there no candidates\\\\?'\n )\n", (64024, 64136), False, 'import pytest\n'), ((64526, 64533), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (64531, 64533), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((64663, 64798), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""cv.split and cv.get_n_splits returned inconsistent results. Expected \\\\d+ splits, got \\\\d+"""'}), "(ValueError, match=\n 'cv.split and cv.get_n_splits returned inconsistent results. Expected \\\\d+ splits, got \\\\d+'\n )\n", (64676, 64798), False, 'import pytest\n'), ((66050, 66090), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['est', "{'n_neighbors': [10]}"], {}), "(est, {'n_neighbors': [10]})\n", (66062, 66090), False, 'from sklearn.model_selection import GridSearchCV\n'), ((7209, 7227), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7225, 7227), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((7229, 7236), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (7234, 7236), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((8059, 8073), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (8068, 8073), True, 'import numpy as np\n'), ((8776, 8787), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (8783, 8787), True, 'import numpy as np\n'), ((8794, 8806), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (8802, 8806), True, 'import numpy as np\n'), ((10084, 10121), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'scoring': 'None'}), '(clf, grid, scoring=None)\n', (10096, 10121), False, 'from sklearn.model_selection import GridSearchCV\n'), ((10154, 10197), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'scoring': '"""accuracy"""'}), "(clf, grid, scoring='accuracy')\n", (10166, 10197), False, 'from sklearn.model_selection import GridSearchCV\n'), ((10419, 10461), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'scoring': '"""roc_auc"""'}), "(clf, grid, scoring='roc_auc')\n", (10431, 10461), False, 'from sklearn.model_selection import GridSearchCV\n'), ((12130, 12144), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (12139, 12144), True, 'import numpy as np\n'), ((18574, 18600), 'sklearn.metrics.f1_score', 'f1_score', (['y_true_', 'y_pred_'], {}), '(y_true_, y_pred_)\n', (18582, 18600), False, 'from sklearn.metrics import f1_score\n'), ((20627, 20641), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (20636, 20641), True, 'import numpy as np\n'), ((21411, 21437), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (21420, 21437), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((24755, 24780), 'numpy.arange', 'np.arange', (['(10 * 5 * 3 * 2)'], {}), '(10 * 5 * 3 * 2)\n', (24764, 24780), True, 'import numpy as np\n'), ((24813, 24835), 'numpy.arange', 'np.arange', (['(10 * 7 * 11)'], {}), '(10 * 7 * 11)\n', (24822, 24835), True, 'import numpy as np\n'), ((25277, 25291), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (25286, 25291), True, 'import numpy as np\n'), ((25713, 25727), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (25722, 25727), True, 'import numpy as np\n'), ((26372, 26386), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (26381, 26386), True, 'import numpy as np\n'), ((29600, 29613), 'scipy.stats.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (29607, 29613), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((33295, 33310), 'scipy.stats.expon', 'expon', ([], {'scale': '(10)'}), '(scale=10)\n', (33300, 33310), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((33336, 33352), 'scipy.stats.expon', 'expon', ([], {'scale': '(0.1)'}), '(scale=0.1)\n', (33341, 33352), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((36867, 36890), 'numpy.mean', 'np.mean', (['test_cv_scores'], {}), '(test_cv_scores)\n', (36874, 36890), True, 'import numpy as np\n'), ((36929, 36951), 'numpy.std', 'np.std', (['test_cv_scores'], {}), '(test_cv_scores)\n', (36935, 36951), True, 'import numpy as np\n'), ((37451, 37478), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (37462, 37478), False, 'from sklearn.metrics import make_scorer\n'), ((37511, 37536), 'sklearn.metrics.make_scorer', 'make_scorer', (['recall_score'], {}), '(recall_score)\n', (37522, 37536), False, 'from sklearn.metrics import make_scorer\n'), ((37617, 37622), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (37620, 37622), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((38156, 38177), 'numpy.logspace', 'np.logspace', (['(-4)', '(1)', '(3)'], {}), '(-4, 1, 3)\n', (38167, 38177), True, 'import numpy as np\n'), ((38203, 38234), 'numpy.logspace', 'np.logspace', (['(-5)', '(0)', '(3)'], {'base': '(0.1)'}), '(-5, 0, 3, base=0.1)\n', (38214, 38234), True, 'import numpy as np\n'), ((38635, 38680), 'sklearn.svm.SVC', 'SVC', ([], {'probability': 'probability', 'random_state': '(42)'}), '(probability=probability, random_state=42)\n', (38638, 38680), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((38709, 38845), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['clf'], {'n_iter': 'n_search_iter', 'cv': 'n_splits', 'param_distributions': 'params', 'scoring': 'scoring', 'refit': 'refit', 'random_state': '(0)'}), '(clf, n_iter=n_search_iter, cv=n_splits,\n param_distributions=params, scoring=scoring, refit=refit, random_state=0)\n', (38727, 38845), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((39883, 39914), 're.sub', 're.sub', (['"""_score$"""', '"""_recall"""', 'k'], {}), "('_score$', '_recall', k)\n", (39889, 39914), False, 'import re\n'), ((40101, 40133), 'numpy.all', 'np.all', (['(cv_results_multi[k] <= 1)'], {}), '(cv_results_multi[k] <= 1)\n', (40107, 40133), True, 'import numpy as np\n'), ((42652, 42731), 'numpy.allclose', 'np.allclose', (["cv_results['mean_test_score'][1]", "cv_results['mean_test_score'][2]"], {}), "(cv_results['mean_test_score'][1], cv_results['mean_test_score'][2])\n", (42663, 42731), True, 'import numpy as np\n'), ((42782, 42868), 'numpy.allclose', 'np.allclose', (["cv_results['mean_train_score'][1]", "cv_results['mean_train_score'][2]"], {}), "(cv_results['mean_train_score'][1], cv_results[\n 'mean_train_score'][2])\n", (42793, 42868), True, 'import numpy as np\n'), ((44082, 44118), 'numpy.all', 'np.all', (['(search.cv_results_[key] >= 0)'], {}), '(search.cv_results_[key] >= 0)\n', (44088, 44118), True, 'import numpy as np\n'), ((44138, 44173), 'numpy.all', 'np.all', (['(search.cv_results_[key] < 1)'], {}), '(search.cv_results_[key] < 1)\n', (44144, 44173), True, 'import numpy as np\n'), ((44356, 44391), 'numpy.all', 'np.all', (['(search.cv_results_[key] < 1)'], {}), '(search.cv_results_[key] < 1)\n', (44362, 44391), True, 'import numpy as np\n'), ((45192, 45227), 'numpy.in1d', 'np.in1d', (['expected_keys', 'result_keys'], {}), '(expected_keys, result_keys)\n', (45199, 45227), True, 'import numpy as np\n'), ((46833, 46881), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['this_scores', 'expected_score'], {}), '(this_scores, expected_score)\n', (46852, 46881), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((50470, 50483), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (50479, 50483), True, 'import numpy as np\n'), ((50565, 50592), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', '{}'], {'cv': '(2)'}), '(clf, {}, cv=2)\n', (50577, 50592), False, 'from sklearn.model_selection import GridSearchCV\n'), ((50736, 50767), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'np.float64'}), '(20, dtype=np.float64)\n', (50745, 50767), True, 'import numpy as np\n'), ((50976, 51035), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['p', "{'classifier__foo_param': [1, 2, 3]}"], {'cv': '(2)'}), "(p, {'classifier__foo_param': [1, 2, 3]}, cv=2)\n", (50988, 51035), False, 'from sklearn.model_selection import GridSearchCV\n'), ((55894, 55907), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (55903, 55907), True, 'import numpy as np\n'), ((55983, 56010), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""'}), "(loss='hinge')\n", (55996, 56010), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((56468, 56495), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""'}), "(loss='hinge')\n", (56481, 56495), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((56712, 56724), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (56721, 56724), True, 'import numpy as np\n'), ((57235, 57290), 'sklearn.model_selection.tests.common.OneTimeSplitter', 'OneTimeSplitter', ([], {'n_splits': 'n_splits', 'n_samples': 'n_samples'}), '(n_splits=n_splits, n_samples=n_samples)\n', (57250, 57290), False, 'from sklearn.model_selection.tests.common import OneTimeSplitter\n'), ((57532, 57556), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (57537, 57556), False, 'from sklearn.model_selection import KFold\n'), ((58231, 58285), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': '(0)'}), '(n_splits=n_splits, shuffle=True, random_state=0)\n', (58236, 58285), False, 'from sklearn.model_selection import KFold\n'), ((59616, 59654), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)'}), '(n_splits=n_splits, shuffle=True)\n', (59621, 59654), False, 'from sklearn.model_selection import KFold\n'), ((63688, 63705), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (63693, 63705), False, 'from sklearn.model_selection import KFold\n'), ((68731, 68767), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(42)'}), '(random_state=42)\n', (68750, 68767), False, 'from sklearn.datasets import make_classification\n'), ((7155, 7173), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7171, 7173), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((8558, 8569), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (8565, 8569), True, 'import numpy as np\n'), ((8717, 8727), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (8724, 8727), True, 'import numpy as np\n'), ((8734, 8746), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (8742, 8746), True, 'import numpy as np\n'), ((14784, 14872), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', '{}'], {'refit': 'refit', 'scoring': "{'acc': 'accuracy', 'prec': 'precision'}"}), "(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec':\n 'precision'})\n", (14796, 14872), False, 'from sklearn.model_selection import GridSearchCV\n'), ((35695, 35711), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (35703, 35711), True, 'import numpy as np\n'), ((35734, 35750), 'numpy.where', 'np.where', (['(y == 2)'], {}), '(y == 2)\n', (35742, 35750), True, 'import numpy as np\n'), ((43318, 43358), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['est', 'est_parameters'], {'cv': 'cv'}), '(est, est_parameters, cv=cv)\n', (43330, 43358), False, 'from sklearn.model_selection import GridSearchCV\n'), ((46002, 46050), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['correct_score', 'cv_scores[i]'], {}), '(correct_score, cv_scores[i])\n', (46021, 46050), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((46543, 46553), 'sklearn.base.clone', 'clone', (['svc'], {}), '(svc)\n', (46548, 46553), False, 'from sklearn.base import clone\n'), ((47716, 47733), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {}), '()\n', (47731, 47733), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((49498, 49596), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['correct_score', "grid_search.cv_results_['split%d_test_score' % i][cand_i]"], {}), "(correct_score, grid_search.cv_results_[\n 'split%d_test_score' % i][cand_i])\n", (49517, 49596), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((50189, 50289), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['correct_score', "random_search.cv_results_['split%d_test_score' % i][cand_i]"], {}), "(correct_score, random_search.cv_results_[\n 'split%d_test_score' % i][cand_i])\n", (50208, 50289), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((50867, 50920), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""mean"""', 'missing_values': 'np.nan'}), "(strategy='mean', missing_values=np.nan)\n", (50880, 50920), False, 'from sklearn.impute import SimpleImputer\n'), ((57663, 57717), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': '(0)'}), '(n_splits=n_splits, shuffle=True, random_state=0)\n', (57668, 57717), False, 'from sklearn.model_selection import KFold\n'), ((61082, 61107), 'numpy.asanyarray', 'np.asanyarray', (['results[k]'], {}), '(results[k])\n', (61095, 61107), True, 'import numpy as np\n'), ((61481, 61535), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'param_grid'], {'return_train_score': '(True)'}), '(clf, param_grid, return_train_score=True)\n', (61493, 61535), False, 'from sklearn.model_selection import GridSearchCV\n'), ((63254, 63259), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (63257, 63259), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((67370, 67383), 'scipy.stats.uniform', 'uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (67377, 67383), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((68168, 68186), 'scipy.stats.uniform', 'uniform', (['(0.01)', '(0.1)'], {}), '(0.01, 0.1)\n', (68175, 68186), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((46634, 46644), 'sklearn.base.clone', 'clone', (['svc'], {}), '(svc)\n', (46639, 46644), False, 'from sklearn.base import clone\n'), ((57930, 57984), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': '(0)'}), '(n_splits=n_splits, shuffle=True, random_state=0)\n', (57935, 57984), False, 'from sklearn.model_selection import KFold\n'), ((61177, 61248), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['exp_results[k]', 'results[k]'], {'err_msg': "('Checking ' + k)"}), "(exp_results[k], results[k], err_msg='Checking ' + k)\n", (61195, 61248), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((61330, 61398), 'sklearn.utils._testing.assert_allclose', 'assert_allclose', (['exp_results[k]', 'results[k]'], {'err_msg': "('Checking ' + k)"}), "(exp_results[k], results[k], err_msg='Checking ' + k)\n", (61345, 61398), False, 'from sklearn.utils._testing import assert_allclose\n'), ((63585, 63590), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (63588, 63590), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((5621, 5660), 'itertools.product', 'product', (["params2['bar']", "params2['foo']"], {}), "(params2['bar'], params2['foo'])\n", (5628, 5660), False, 'from itertools import chain, product\n'), ((45958, 45985), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y[test]', 'dec'], {}), '(y[test], dec)\n', (45971, 45985), False, 'from sklearn.metrics import roc_auc_score\n')] |
# -*- encoding:utf-8 -*-
# @Time : 2021/1/3 15:15
# @Author : gfjiang
import os.path as osp
import mmcv
import numpy as np
import cvtools
import matplotlib.pyplot as plt
import cv2.cv2 as cv
from functools import partial
import torch
import math
from cvtools.utils.path import add_prefix_filename_suffix
from mmdet.ops import nms
from mmdet.apis import init_detector, inference_detector
def draw_features(module, input, output, work_dir='./'):
x = output.cpu().numpy()
out_channels = list(output.shape)[1]
height = int(math.sqrt(out_channels))
width = height
if list(output.shape)[2] < 128:
return
fig = plt.figure(figsize=(32, 32))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05)
for i in range(height * width):
plt.subplot(height, width, i + 1)
plt.axis('off')
img = x[0, i, :, :]
pmin = np.min(img)
pmax = np.max(img)
img = ((img - pmin) / (pmax - pmin + 0.000001))*255 # float在[0,1]之间,转换成0-255
img = img.astype(np.uint8) # 转成unit8
img = cv.applyColorMap(img, cv.COLORMAP_JET) # 生成heat map
img = img[:, :, ::-1] # 注意cv2(BGR)和matplotlib(RGB)通道是相反的
plt.imshow(img)
# print("{}/{}".format(i,width*height))
savename = get_image_name_for_hook(module, work_dir)
fig.savefig(savename, dpi=100)
fig.clf()
plt.close()
def get_image_name_for_hook(module, work_dir='./'):
"""
Generate image filename for hook function
Parameters:
-----------
module: module of neural network
"""
# os.makedirs(work_dir, exist_ok=True)
module_name = str(module)
base_name = module_name.split('(')[0]
index = 0
image_name = '.' # '.' is surely exist, to make first loop condition True
while osp.exists(image_name):
index += 1
image_name = osp.join(
work_dir, 'feats', '%s_%d.png' % (base_name, index))
return image_name
class AerialDetectionOBB(object):
def __init__(self, config, pth):
self.imgs = []
self.cfg = mmcv.Config.fromfile(config)
self.pth = pth
print('loading model {} ...'.format(pth))
self.model = init_detector(self.cfg, self.pth, device='cuda:0')
self.results = []
self.img_detected = []
# self.vis_feats((torch.nn.Conv2d, torch.nn.MaxPool2d))
def __call__(self,
imgs_or_path,
det_thrs=0.5,
vis=False,
vis_thr=0.5,
save_root=''):
if isinstance(imgs_or_path, str):
self.imgs += cvtools.get_files_list(imgs_or_path)
else:
self.imgs += imgs_or_path
prog_bar = mmcv.ProgressBar(len(self.imgs))
for _, img in enumerate(self.imgs):
self.detect(img, det_thrs=det_thrs, vis=vis,
vis_thr=vis_thr, save_root=save_root)
prog_bar.update()
def detect(self,
img,
det_thrs=0.5,
vis=False,
vis_thr=0.5,
save_root=''):
result = inference_detector(self.model, img)
# result = self.nms(result)
if isinstance(det_thrs, float):
det_thrs = [det_thrs] * len(result)
if vis:
to_file = osp.join(save_root, osp.basename(img))
to_file = add_prefix_filename_suffix(to_file, suffix='_obb')
self.vis(img, result, vis_thr=vis_thr, to_file=to_file)
result = [det[det[..., -1] > det_thr] for det, det_thr
in zip(result, det_thrs)]
if len(result) == 0:
print('detect: image {} has no object.'.format(img))
self.img_detected.append(img)
self.results.append(result)
return result
def nms(self, result, nms_th=0.3):
dets_num = [len(det_cls) for det_cls in result]
result = np.vstack(result)
_, ids = nms(result, nms_th)
total_num = 0
nms_result = []
for num in dets_num:
ids_cls = ids[np.where((total_num <= ids) & (ids < num))[0]]
nms_result.append(result[ids_cls])
total_num += num
return nms_result
def vis(self, img, bbox_result, vis_thr=0.5,
to_file='vis.jpg'):
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
inds = np.where(bboxes[:, -1] > vis_thr)[0]
bboxes = bboxes[inds]
labels = labels[inds]
texts = [self.model.CLASSES[index]+'|'+str(round(bbox[-1], 2))
for index, bbox in zip(labels, bboxes)]
img = cvtools.draw_boxes_texts(
img, bboxes[:, :-1], box_format='polygon', line_width=2)
cvtools.imwrite(img, to_file)
def vis_feats(self, modules_for_plot):
h, w = self.cfg.data.train.img_scale
for name, module in self.model.named_modules():
if isinstance(module, modules_for_plot):
draw_features_func = partial(
draw_features, work_dir=self.cfg.work_dir)
module.register_forward_hook(draw_features_func)
def save_results(self, save):
str_results = ''
for i, img in enumerate(self.img_detected):
result = self.results[i]
img = osp.basename(img)
for cls_index, dets in enumerate(result):
cls = self.model.CLASSES[cls_index]
for box in dets:
bbox_str = ','.join(map(str, map(int, box[:4])))
str_results += ' '.join([img, cls, bbox_str]) + '\n'
with open(save, 'w') as f:
f.write(str_results)
if __name__ == '__main__':
config_file = 'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2.py'
pth_file = 'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/epoch_12.pth'
detector = AerialDetectionOBB(config_file, pth_file)
detector('/media/data/DOTA/crop/P2701_2926_1597_3949_2620.png', vis=True,
save_root='work_dirs/attention_vis/')
detector.save_results('work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/detect_result.txt')
| [
"math.sqrt",
"cvtools.imwrite",
"mmdet.ops.nms",
"matplotlib.pyplot.imshow",
"os.path.exists",
"cvtools.utils.path.add_prefix_filename_suffix",
"cvtools.draw_boxes_texts",
"numpy.where",
"mmdet.apis.init_detector",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.vstack",
"numpy.concatenate",
"numpy.min",
"matplotlib.pyplot.axis",
"mmdet.apis.inference_detector",
"cvtools.get_files_list",
"cv2.cv2.applyColorMap",
"os.path.join",
"matplotlib.pyplot.figure",
"functools.partial",
"os.path.basename",
"mmcv.Config.fromfile",
"numpy.full",
"matplotlib.pyplot.subplot"
] | [((643, 671), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(32, 32)'}), '(figsize=(32, 32))\n', (653, 671), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1410), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1408, 1410), True, 'import matplotlib.pyplot as plt\n'), ((1815, 1837), 'os.path.exists', 'osp.exists', (['image_name'], {}), '(image_name)\n', (1825, 1837), True, 'import os.path as osp\n'), ((538, 561), 'math.sqrt', 'math.sqrt', (['out_channels'], {}), '(out_channels)\n', (547, 561), False, 'import math\n'), ((812, 845), 'matplotlib.pyplot.subplot', 'plt.subplot', (['height', 'width', '(i + 1)'], {}), '(height, width, i + 1)\n', (823, 845), True, 'import matplotlib.pyplot as plt\n'), ((854, 869), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (862, 869), True, 'import matplotlib.pyplot as plt\n'), ((913, 924), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (919, 924), True, 'import numpy as np\n'), ((940, 951), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (946, 951), True, 'import numpy as np\n'), ((1098, 1136), 'cv2.cv2.applyColorMap', 'cv.applyColorMap', (['img', 'cv.COLORMAP_JET'], {}), '(img, cv.COLORMAP_JET)\n', (1114, 1136), True, 'import cv2.cv2 as cv\n'), ((1225, 1240), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1235, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1940), 'os.path.join', 'osp.join', (['work_dir', '"""feats"""', "('%s_%d.png' % (base_name, index))"], {}), "(work_dir, 'feats', '%s_%d.png' % (base_name, index))\n", (1887, 1940), True, 'import os.path as osp\n'), ((2092, 2120), 'mmcv.Config.fromfile', 'mmcv.Config.fromfile', (['config'], {}), '(config)\n', (2112, 2120), False, 'import mmcv\n'), ((2215, 2265), 'mmdet.apis.init_detector', 'init_detector', (['self.cfg', 'self.pth'], {'device': '"""cuda:0"""'}), "(self.cfg, self.pth, device='cuda:0')\n", (2228, 2265), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((3136, 3171), 'mmdet.apis.inference_detector', 'inference_detector', (['self.model', 'img'], {}), '(self.model, img)\n', (3154, 3171), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((3924, 3941), 'numpy.vstack', 'np.vstack', (['result'], {}), '(result)\n', (3933, 3941), True, 'import numpy as np\n'), ((3959, 3978), 'mmdet.ops.nms', 'nms', (['result', 'nms_th'], {}), '(result, nms_th)\n', (3962, 3978), False, 'from mmdet.ops import nms\n'), ((4328, 4350), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (4337, 4350), True, 'import numpy as np\n'), ((4501, 4523), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (4515, 4523), True, 'import numpy as np\n'), ((4778, 4863), 'cvtools.draw_boxes_texts', 'cvtools.draw_boxes_texts', (['img', 'bboxes[:, :-1]'], {'box_format': '"""polygon"""', 'line_width': '(2)'}), "(img, bboxes[:, :-1], box_format='polygon',\n line_width=2)\n", (4802, 4863), False, 'import cvtools\n'), ((4881, 4910), 'cvtools.imwrite', 'cvtools.imwrite', (['img', 'to_file'], {}), '(img, to_file)\n', (4896, 4910), False, 'import cvtools\n'), ((2630, 2666), 'cvtools.get_files_list', 'cvtools.get_files_list', (['imgs_or_path'], {}), '(imgs_or_path)\n', (2652, 2666), False, 'import cvtools\n'), ((3395, 3445), 'cvtools.utils.path.add_prefix_filename_suffix', 'add_prefix_filename_suffix', (['to_file'], {'suffix': '"""_obb"""'}), "(to_file, suffix='_obb')\n", (3421, 3445), False, 'from cvtools.utils.path import add_prefix_filename_suffix\n'), ((4382, 4423), 'numpy.full', 'np.full', (['bbox.shape[0]', 'i'], {'dtype': 'np.int32'}), '(bbox.shape[0], i, dtype=np.int32)\n', (4389, 4423), True, 'import numpy as np\n'), ((4539, 4572), 'numpy.where', 'np.where', (['(bboxes[:, -1] > vis_thr)'], {}), '(bboxes[:, -1] > vis_thr)\n', (4547, 4572), True, 'import numpy as np\n'), ((5454, 5471), 'os.path.basename', 'osp.basename', (['img'], {}), '(img)\n', (5466, 5471), True, 'import os.path as osp\n'), ((3354, 3371), 'os.path.basename', 'osp.basename', (['img'], {}), '(img)\n', (3366, 3371), True, 'import os.path as osp\n'), ((5150, 5200), 'functools.partial', 'partial', (['draw_features'], {'work_dir': 'self.cfg.work_dir'}), '(draw_features, work_dir=self.cfg.work_dir)\n', (5157, 5200), False, 'from functools import partial\n'), ((4080, 4122), 'numpy.where', 'np.where', (['((total_num <= ids) & (ids < num))'], {}), '((total_num <= ids) & (ids < num))\n', (4088, 4122), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
tradingAPI.low_level
~~~~~~~~~~~~~~
This module provides the low level functions with the service.
"""
import time
import re
from datetime import datetime
from pyvirtualdisplay import Display
from bs4 import BeautifulSoup
from splinter import Browser
from .glob import Glob
from .links import path
from .utils import num, expect, get_pip
# exceptions
from tradingAPI import exceptions
import selenium.common.exceptions
# logging
import logging
logger = logging.getLogger('tradingAPI.low_level')
class Stock(object):
"""base class for stocks"""
def __init__(self, product):
self.product = product
self.market = True
self.records = []
def new_rec(self, rec):
"""add a record"""
self.records.append(rec)
return self.records
class Movement(object):
"""class-storing movement"""
def __init__(self, product, quantity, mode, price):
self.product = product
self.quantity = quantity
self.mode = mode
self.price = price
class PurePosition(object):
"""class-storing position"""
def __init__(self, product, quantity, mode, price):
self.product = product
self.quantity = quantity
self.mode = mode
self.price = price
def __repr__(self):
return ' - '.join([str(self.product), str(self.quantity),
str(self.mode), str(self.price)])
class LowLevelAPI(object):
"""low level api to interface with the service"""
def __init__(self, brow="firefox"):
self.brow_name = brow
self.positions = []
self.movements = []
self.stocks = []
# init globals
Glob()
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
def get_bottom_info(self, info):
accepted_values = {
'free_funds': 'equity-free',
'account_value': 'equity-total',
'live_result': 'equity-ppl',
'used_margin': 'equity-margin'}
try:
info_label = accepted_values[info]
val = self.css1("div#%s span.equity-item-value" % info_label).text
return num(val)
except KeyError as e:
raise exceptions.BaseExc(e)
def get_price(self, name):
soup = BeautifulSoup(
self.css1("div.scrollable-area-content").html, "html.parser")
for product in soup.select("div.tradebox"):
fullname = product.select("span.instrument-name")[0].text.lower()
if name.lower() in fullname:
mark_closed_list = [x for x in product.select(
"div.quantity-list-input-wrapper") if x.select(
"div.placeholder")[0].text.lower().find("close") != -1]
if mark_closed_list:
sell_price = product.select("div.tradebox-price-sell")[0]\
.text
return float(sell_price)
else:
return False
class MovementWindow(object):
"""add movement window"""
def __init__(self, api, product):
self.api = api
self.product = product
self.state = 'initialized'
self.insfu = False
def open(self, name_counter=None):
"""open the window"""
if self.api.css1(path['add-mov']).visible:
self.api.css1(path['add-mov']).click()
else:
self.api.css1('span.dataTable-no-data-action').click()
logger.debug("opened window")
self.api.css1(path['search-box']).fill(self.product)
if self.get_result(0) is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(self.product)
result, product = self.search_res(self.product, name_counter)
result.click()
if self.api.elCss("div.widget_message"):
self.decode(self.api.css1("div.widget_message"))
self.product = product
self.state = 'open'
def _check_open(self):
if self.state == 'open':
return True
else:
raise exceptions.WindowException()
def close(self):
"""close a movement"""
self._check_open()
self.api.css1(path['close']).click()
self.state = 'closed'
logger.debug("closed window")
def confirm(self):
"""confirm the movement"""
self._check_open()
self.get_price()
self.api.css1(path['confirm-btn']).click()
widg = self.api.css("div.widget_message")
if widg:
self.decode(widg[0])
raise exceptions.WidgetException(widg)
if all(x for x in ['quantity', 'mode'] if hasattr(self, x)):
self.api.movements.append(Movement(
self.product, self.quantity, self.mode, self.price))
logger.debug("%s movement appended to the list" % self.product)
self.state = 'conclused'
logger.debug("confirmed movement")
def search_res(self, res, check_counter=None):
"""search for a res"""
logger.debug("searching result")
result = self.get_result(0)
name = self.get_research_name(result)
x = 0
while not self.check_name(res, name, counter=check_counter):
name = self.get_research_name(self.get_result(x))
if name is None:
self.api.css1(path['close']).click()
raise exceptions.ProductNotFound(res)
logger.debug(name)
if self.check_name(res, name, counter=check_counter):
return self.get_result(x)
x += 1
logger.debug("found product at position %d" % (x + 1))
return result, name
def check_name(self, name, string, counter=None):
"""if both in string return False"""
name = name.lower()
string = string.lower()
if counter is None:
if name in string:
return True
else:
return False
counter = counter.lower()
if name in string and counter in string:
logger.debug("check_name: counter found in string")
return False
elif name in string and counter not in string:
return True
else:
return False
def get_research_name(self, res):
"""return result name"""
if res is None:
return None
return self.api.css1("span.instrument-name", res).text
def get_result(self, pos):
"""get pos result, where 0 is first"""
evalxpath = path['res'] + f"[{pos + 1}]"
try:
res = self.api.xpath(evalxpath)[0]
return res
except Exception:
return None
def set_limit(self, category, mode, value):
"""set limit in movement window"""
self._check_open()
if (mode not in ["unit", "value"] or category
not in ["gain", "loss", "both"]):
raise ValueError()
if not hasattr(self, 'stop_limit'):
self.stop_limit = {'gain': {}, 'loss': {}}
logger.debug("initialized stop_limit")
if category == 'gain':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
elif category == 'loss':
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
if category != 'both':
self.stop_limit[category]['mode'] = mode
self.stop_limit[category]['value'] = value
elif category == 'both':
self.api.xpath(
path['limit-gain-%s' % mode])[0].fill(str(value))
self.api.xpath(
path['limit-loss-%s' % mode])[0].fill(str(value))
for cat in ['gain', 'loss']:
self.stop_limit[cat]['mode'] = mode
self.stop_limit[cat]['value'] = value
logger.debug("set limit")
def decode(self, message):
"""decode text pop-up"""
title = self.api.css1("div.title", message).text
text = self.api.css1("div.text", message).text
if title == "Insufficient Funds":
self.insfu = True
elif title == "Maximum Quantity Limit":
raise exceptions.MaxQuantLimit(num(text))
elif title == "Minimum Quantity Limit":
raise exceptions.MinQuantLimit(num(text))
logger.debug("decoded message")
def decode_update(self, message, value, mult=0.1):
"""decode and update the value"""
try:
msg_text = self.api.css1("div.text", message).text
return num(msg_text)
except Exception:
if msg_text.lower().find("higher") != -1:
value += value * mult
return value
else:
self.decode(message)
return None
def get_mov_margin(self):
"""get the margin of the movement"""
self._check_open()
return num(self.api.css1("span.cfd-order-info-item-value").text)
def set_mode(self, mode):
"""set mode (buy or sell)"""
self._check_open()
if mode not in ["buy", "sell"]:
raise ValueError()
self.api.css1(path[mode + '-btn']).click()
self.mode = mode
logger.debug("mode set")
def get_quantity(self):
"""gte current quantity"""
self._check_open()
quant = int(num(self.api.css1(path['quantity']).value))
self.quantity = quant
return quant
def set_quantity(self, quant):
"""set quantity"""
self._check_open()
self.api.css1(path['quantity']).fill(str(int(quant)))
self.quantity = quant
logger.debug("quantity set")
def get_price(self, mode='buy'):
"""get current price"""
if mode not in ['buy', 'sell']:
raise ValueError()
self._check_open()
price = num(self.api.css1(
"div.orderdialog div.tradebox-price-%s" % mode).text)
self.price = price
return price
def get_unit_value(self):
"""get unit value of stock based on margin, memoized"""
# find in the collection
try:
unit_value = Glob().theCollector.collection['unit_value']
unit_value_res = unit_value[self.product]
logger.debug("unit_value found in the collection")
return unit_value_res
except KeyError:
logger.debug("unit_value not found in the collection")
pip = get_pip(mov=self)
quant = 1 / pip
if hasattr(self, 'quantity'):
old_quant == self.quantity
self.set_quantity(quant)
# update the site
time.sleep(0.5)
margin = self.get_mov_margin()
logger.debug(f"quant: {quant} - pip: {pip} - margin: {margin}")
if 'old_quant' in locals():
self.set_quantity(old_quant)
unit_val = margin / quant
self.unit_value = unit_val
Glob().unit_valueHandler.add_val({self.product: unit_val})
return unit_val
def new_mov(self, name):
"""factory method pattern"""
return self.MovementWindow(self, name)
class Position(PurePosition):
"""position object"""
def __init__(self, api, html_div):
"""initialized from div"""
self.api = api
if isinstance(html_div, type('')):
self.soup_data = BeautifulSoup(html_div, 'html.parser')
else:
self.soup_data = html_div
self.product = self.soup_data.select("td.name")[0].text
self.quantity = num(self.soup_data.select("td.quantity")[0].text)
if ("direction-label-buy" in
self.soup_data.select("td.direction")[0].span['class']):
self.mode = 'buy'
else:
self.mode = 'sell'
self.price = num(self.soup_data.select("td.averagePrice")[0].text)
self.margin = num(self.soup_data.select("td.margin")[0].text)
self.id = self.find_id()
def update(self, soup):
"""update the soup"""
self.soup_data = soup
return soup
def find_id(self):
"""find pos ID with with given data"""
pos_id = self.soup_data['id']
self.id = pos_id
return pos_id
@property
def close_tag(self):
"""obtain close tag"""
return f"#{self.id} div.close-icon"
def close(self):
"""close position via tag"""
self.api.css1(self.close_tag).click()
try:
self.api.xpath(path['ok_but'])[0].click()
except selenium.common.exceptions.ElementNotInteractableException:
if (self.api.css1('.widget_message div.title').text ==
'Market Closed'):
logger.error("market closed, position can't be closed")
raise exceptions.MarketClosed()
raise exceptions.WidgetException(
self.api.css1('.widget_message div.text').text)
# wait until it's been closed
# set a timeout
timeout = time.time() + 10
while self.api.elCss(self.close_tag):
time.sleep(0.1)
if time.time() > timeout:
raise TimeoutError("failed to close pos %s" % self.id)
logger.debug("closed pos %s" % self.id)
def get_gain(self):
"""get current profit"""
gain = num(self.soup_data.select("td.ppl")[0].text)
self.gain = gain
return gain
def bind_mov(self):
"""bind the corresponding movement"""
logger = logging.getLogger("tradingAPI.low_level.bind_mov")
mov_list = [x for x in self.api.movements
if x.product == self.product and
x.quantity == self.quantity and
x.mode == self.mode]
if not mov_list:
logger.debug("fail: mov not found")
return None
else:
logger.debug("success: found movement")
for x in mov_list:
# find approximate price
max_roof = self.price + self.price * 0.01
min_roof = self.price - self.price * 0.01
if min_roof < x.price < max_roof:
logger.debug("success: price corresponding")
# bind mov
self.mov = x
return x
else:
logger.debug("fail: price %f not corresponding to %f" %
(self.price, x.price))
continue
# if nothing, return None
return None
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
| [
"logging.getLogger",
"tradingAPI.exceptions.WindowException",
"tradingAPI.exceptions.MarketClosed",
"splinter.Browser",
"tradingAPI.exceptions.ProductNotFound",
"tradingAPI.exceptions.VBroException",
"time.sleep",
"tradingAPI.exceptions.BrowserException",
"bs4.BeautifulSoup",
"datetime.datetime.now",
"tradingAPI.exceptions.BaseExc",
"time.time",
"tradingAPI.exceptions.WidgetException",
"pyvirtualdisplay.Display"
] | [((485, 526), 'logging.getLogger', 'logging.getLogger', (['"""tradingAPI.low_level"""'], {}), "('tradingAPI.low_level')\n", (502, 526), False, 'import logging\n'), ((1879, 1888), 'pyvirtualdisplay.Display', 'Display', ([], {}), '()\n', (1886, 1888), False, 'from pyvirtualdisplay import Display\n'), ((2083, 2106), 'splinter.Browser', 'Browser', (['self.brow_name'], {}), '(self.brow_name)\n', (2090, 2106), False, 'from splinter import Browser\n'), ((4533, 4546), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4543, 4546), False, 'import time\n'), ((15323, 15338), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (15333, 15338), False, 'import time\n'), ((18436, 18486), 'logging.getLogger', 'logging.getLogger', (['"""tradingAPI.low_level.bind_mov"""'], {}), "('tradingAPI.low_level.bind_mov')\n", (18453, 18486), False, 'import logging\n'), ((2016, 2042), 'tradingAPI.exceptions.VBroException', 'exceptions.VBroException', ([], {}), '()\n', (2040, 2042), False, 'from tradingAPI import exceptions\n'), ((2214, 2277), 'tradingAPI.exceptions.BrowserException', 'exceptions.BrowserException', (['self.brow_name', '"""failed to launch"""'], {}), "(self.brow_name, 'failed to launch')\n", (2241, 2277), False, 'from tradingAPI import exceptions\n'), ((4305, 4316), 'time.time', 'time.time', ([], {}), '()\n', (4314, 4316), False, 'import time\n'), ((5239, 5260), 'tradingAPI.exceptions.BaseExc', 'exceptions.BaseExc', (['e'], {}), '(e)\n', (5257, 5260), False, 'from tradingAPI import exceptions\n'), ((5434, 5492), 'tradingAPI.exceptions.BrowserException', 'exceptions.BrowserException', (['self.brow_name', '"""not started"""'], {}), "(self.brow_name, 'not started')\n", (5461, 5492), False, 'from tradingAPI import exceptions\n'), ((6049, 6070), 'tradingAPI.exceptions.BaseExc', 'exceptions.BaseExc', (['e'], {}), '(e)\n', (6067, 6070), False, 'from tradingAPI import exceptions\n'), ((7576, 7616), 'tradingAPI.exceptions.ProductNotFound', 'exceptions.ProductNotFound', (['self.product'], {}), '(self.product)\n', (7602, 7616), False, 'from tradingAPI import exceptions\n'), ((8040, 8068), 'tradingAPI.exceptions.WindowException', 'exceptions.WindowException', ([], {}), '()\n', (8066, 8068), False, 'from tradingAPI import exceptions\n'), ((8602, 8634), 'tradingAPI.exceptions.WidgetException', 'exceptions.WidgetException', (['widg'], {}), '(widg)\n', (8628, 8634), False, 'from tradingAPI import exceptions\n'), ((16087, 16125), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_div', '"""html.parser"""'], {}), "(html_div, 'html.parser')\n", (16100, 16125), False, 'from bs4 import BeautifulSoup\n'), ((17885, 17896), 'time.time', 'time.time', ([], {}), '()\n', (17894, 17896), False, 'import time\n'), ((17968, 17983), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (17978, 17983), False, 'import time\n'), ((4389, 4400), 'time.time', 'time.time', ([], {}), '()\n', (4398, 4400), False, 'import time\n'), ((4741, 4752), 'time.time', 'time.time', ([], {}), '()\n', (4750, 4752), False, 'import time\n'), ((9496, 9527), 'tradingAPI.exceptions.ProductNotFound', 'exceptions.ProductNotFound', (['res'], {}), '(res)\n', (9522, 9527), False, 'from tradingAPI import exceptions\n'), ((18003, 18014), 'time.time', 'time.time', ([], {}), '()\n', (18012, 18014), False, 'import time\n'), ((4838, 4849), 'time.time', 'time.time', ([], {}), '()\n', (4847, 4849), False, 'import time\n'), ((17645, 17670), 'tradingAPI.exceptions.MarketClosed', 'exceptions.MarketClosed', ([], {}), '()\n', (17668, 17670), False, 'from tradingAPI import exceptions\n'), ((4671, 4685), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4683, 4685), False, 'from datetime import datetime\n')] |
import math
import torch
import torch.nn as nn
from models.neural import MultiHeadedAttention, PositionwiseFeedForward
from models.rnn import LayerNormLSTM
class Classifier(nn.Module):
def __init__(self, hidden_size):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask_cls):
h = self.linear1(x).squeeze(-1)
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *
-(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0)
super(PositionalEncoding, self).__init__()
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb, step=None):
emb = emb * math.sqrt(self.dim)
if (step):
emb = emb + self.pe[:, step][:, None, :]
else:
emb = emb + self.pe[:, :emb.size(1)]
emb = self.dropout(emb)
return emb
def get_emb(self, emb):
return self.pe[:, :emb.size(1)]
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, heads, d_ff, dropout):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, iter, query, inputs, mask):
if (iter != 0):
input_norm = self.layer_norm(inputs)
else:
input_norm = inputs
mask = mask.unsqueeze(1)
context = self.self_attn(input_norm, input_norm, input_norm,
mask=mask)
out = self.dropout(context) + inputs
return self.feed_forward(out)
class TransformerInterEncoder(nn.Module):
def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0):
super(TransformerInterEncoder, self).__init__()
self.d_model = d_model
self.num_inter_layers = num_inter_layers
self.pos_emb = PositionalEncoding(dropout, d_model)
self.transformer_inter = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads, d_ff, dropout)
for _ in range(num_inter_layers)])
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.wo = nn.Linear(d_model, 1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, top_vecs, mask):
""" See :obj:`EncoderBase.forward()`"""
batch_size, n_sents = top_vecs.size(0), top_vecs.size(1)
pos_emb = self.pos_emb.pe[:, :n_sents]
x = top_vecs * mask[:, :, None].float()
x = x + pos_emb
for i in range(self.num_inter_layers):
x = self.transformer_inter[i](i, x, x, ~mask) # all_sents * max_tokens * dim
x = self.layer_norm(x)
sent_scores = self.sigmoid(self.wo(x))
sent_scores = sent_scores.squeeze(-1) * mask.float()
return sent_scores
class GRUEncoder_attn(nn.Module):
def __init__(self,bidirectional, num_layers, input_size, hidden_size,dropout=0.0):
super(GRUEncoder_attn,self).__init__()
class RNNEncoder_attn(nn.Module):
def __init__(self, bidirectional, num_layers, input_size,
hidden_size, dropout=0.0):
super(RNNEncoder_attn, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.relu = nn.ReLU()
self.rnn = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional)
self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True)
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax()
print('this is dropout',dropout)
def forward(self, x, mask):
"""See :func:`EncoderBase.forward()`"""
batch, layer, seq, hidden = x.size()
x1=x.contiguous().view(batch * layer, -1, hidden)
x1 = torch.transpose(x1, 1, 0)
memory_bank, _ = self.rnn(x1)
memory_bank = self.dropout(memory_bank) + x1
memory_bank = torch.transpose(memory_bank, 1, 0)
# sent_scores = self.softmax(self.relu(self.wo(memory_bank)).squeeze(dim=-1)).unsqueeze(-1)
sent_scores = self.softmax(self.relu(self.wo(memory_bank[:,-1,:])).squeeze(dim=-1).view(-1,layer)).unsqueeze(-1)
x=x.transpose(1,2)
sent_vec = torch.matmul(sent_scores.transpose(1,2).unsqueeze(dim = 1).expand(batch,seq,1,layer),x)
return sent_vec.squeeze(dim = 2)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, heads, d_ff, dropout):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, iter, ent_enc, inputs, self_attn_mask=None,context_attn_mask=None):
context = self.self_attn(inputs, inputs, inputs,
mask=self_attn_mask)
dec_output = self.self_attn(
ent_enc, ent_enc, context, mask=context_attn_mask)
dec_output = self.feed_forward(dec_output)
return dec_output
class TransformerInterDecoder(nn.Module):
def __init__(self, d_model, d_ff, heads, dropout, d_hidden, num_inter_layers=0):
super(TransformerInterDecoder, self).__init__()
self.d_model = d_model
self.num_inter_layers = num_inter_layers
self.pos_emb = PositionalEncoding(dropout, d_model)
self.transformer_inter = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout)
for _ in range(num_inter_layers)])
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.wo = nn.Linear(d_model, d_hidden , bias=True)
self.wi = nn.Linear(d_model, d_hidden, bias=True)
self.v = nn.Linear(d_hidden, 1, bias=True)
self.LR = nn.LeakyReLU()
self.softmax = nn.Softmax(dim=-1)
def forward(self, top_vecs, inputs, mask, label_mask=None):
""" See :obj:`EncoderBase.forward()`"""
n_out = inputs.size(1)
pos_emb = self.pos_emb.pe[:, :n_out]
seq_mask=subsequent_mask(inputs)
self_attn_mask = torch.gt((~label_mask.unsqueeze(1).expand(-1, n_out, -1) + seq_mask), 0)
inputs=inputs+pos_emb
for i in range(self.num_inter_layers):
inputs = self.transformer_inter[i](i, top_vecs, inputs,self_attn_mask,~ mask.unsqueeze(1).expand(-1, n_out,-1))
scores=self.v(self.LR(
self.wo(inputs.unsqueeze(2)).expand(-1, -1, top_vecs.size(1), -1) + self.wi(top_vecs).unsqueeze(
1))).squeeze(-1)
sent_scores = self.softmax(scores)
return sent_scores
class RNNEncoder(nn.Module):
def __init__(self, bidirectional, num_layers, input_size,
hidden_size, dropout=0.0):
super(RNNEncoder, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.rnn = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional)
self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True)
self.dropout = nn.Dropout(dropout)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask):
"""See :func:`EncoderBase.forward()`"""
x = torch.transpose(x, 1, 0)
memory_bank, _ = self.rnn(x)
memory_bank = self.dropout(memory_bank) + x
memory_bank = torch.transpose(memory_bank, 1, 0)
sent_scores = self.sigmoid(self.wo(memory_bank))
sent_scores = sent_scores.squeeze(-1) * mask.float()
return sent_scores
class GCN(nn.Module):
def __init__(self,in_channel,out_channel,hidden_dim,drop):
super(GCN, self).__init__()
self.in_channel=in_channel
self.out_channel=out_channel
self.hidden_dim=hidden_dim
self.dropout = nn.Dropout(p=drop)
self.gcn_x_11=GCNConv(self.in_channel,self.hidden_dim)
self.gcn_x_12=GCNConv(self.hidden_dim,self.out_channel)#No.1-*2*2
# self.gcn_x_21=GCNConv(self.in_channel,self.hidden_dim)
# self.gcn_x_22=GCNConv(self.hidden_dim,self.out_channel)#No.2-*2
# self.gcn_mix=GCNConv(self.hidden_dim*2,self.hidden_dim)#No.2-*2
self.relu=nn.ReLU(inplace=True)
def forward(self, x_1, edge_index_1, edge_index_2=None,edge_weight_1=None,edge_weight_2=None):
syn=self.gcn_x_11(x_1, edge_index_1, edge_weight_1)
syn=self.relu(syn)
syn=self.dropout(syn)
syn = self.gcn_x_12(syn, edge_index_1, edge_weight_1)
syn = self.relu(syn)
syn = self.dropout(syn)
# x2 = self.gcn_x_21(x_1, edge_index_2, edge_weight_2)
# x2 = self.relu(x2)
# x2 = self.dropout(x2)
# mix = self.gcn_mix(torch.cat((syn,x2),-1), edge_index_2, edge_weight_2)
# x2 = self.gcn_x_22(mix, edge_index_2, edge_weight_2)
# syn=self.gcn_x_12(mix, edge_index_1, edge_weight_1)
# syn=self.relu(syn)
# syn=self.dropout(syn)
# x2 = self.relu(x2)
# x2 = self.dropout(x2)
return syn
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.nn.LeakyReLU",
"models.neural.MultiHeadedAttention",
"torch.nn.LayerNorm",
"math.sqrt",
"models.neural.PositionwiseFeedForward",
"torch.transpose",
"math.log",
"models.rnn.LayerNormLSTM",
"torch.nn.Linear",
"torch.zeros",
"torch.arange"
] | [((292, 317), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (301, 317), True, 'import torch.nn as nn\n'), ((341, 353), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (351, 353), True, 'import torch.nn as nn\n'), ((620, 645), 'torch.zeros', 'torch.zeros', (['max_len', 'dim'], {}), '(max_len, dim)\n', (631, 645), False, 'import torch\n'), ((1101, 1122), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (1111, 1122), True, 'import torch.nn as nn\n'), ((1663, 1716), 'models.neural.MultiHeadedAttention', 'MultiHeadedAttention', (['heads', 'd_model'], {'dropout': 'dropout'}), '(heads, d_model, dropout=dropout)\n', (1683, 1716), False, 'from models.neural import MultiHeadedAttention, PositionwiseFeedForward\n'), ((1758, 1805), 'models.neural.PositionwiseFeedForward', 'PositionwiseFeedForward', (['d_model', 'd_ff', 'dropout'], {}), '(d_model, d_ff, dropout)\n', (1781, 1805), False, 'from models.neural import MultiHeadedAttention, PositionwiseFeedForward\n'), ((1832, 1864), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': '(1e-06)'}), '(d_model, eps=1e-06)\n', (1844, 1864), True, 'import torch.nn as nn\n'), ((1887, 1906), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1897, 1906), True, 'import torch.nn as nn\n'), ((2809, 2828), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2819, 2828), True, 'import torch.nn as nn\n'), ((2855, 2887), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': '(1e-06)'}), '(d_model, eps=1e-06)\n', (2867, 2887), True, 'import torch.nn as nn\n'), ((2905, 2937), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(1)'], {'bias': '(True)'}), '(d_model, 1, bias=True)\n', (2914, 2937), True, 'import torch.nn as nn\n'), ((2961, 2973), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2971, 2973), True, 'import torch.nn as nn\n'), ((4086, 4095), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4093, 4095), True, 'import torch.nn as nn\n'), ((4116, 4234), 'models.rnn.LayerNormLSTM', 'LayerNormLSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'bidirectional': 'bidirectional'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, bidirectional=bidirectional)\n', (4129, 4234), False, 'from models.rnn import LayerNormLSTM\n'), ((4298, 4351), 'torch.nn.Linear', 'nn.Linear', (['(num_directions * hidden_size)', '(1)'], {'bias': '(True)'}), '(num_directions * hidden_size, 1, bias=True)\n', (4307, 4351), True, 'import torch.nn as nn\n'), ((4376, 4395), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (4386, 4395), True, 'import torch.nn as nn\n'), ((4419, 4431), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (4429, 4431), True, 'import torch.nn as nn\n'), ((4670, 4695), 'torch.transpose', 'torch.transpose', (['x1', '(1)', '(0)'], {}), '(x1, 1, 0)\n', (4685, 4695), False, 'import torch\n'), ((4809, 4843), 'torch.transpose', 'torch.transpose', (['memory_bank', '(1)', '(0)'], {}), '(memory_bank, 1, 0)\n', (4824, 4843), False, 'import torch\n'), ((5422, 5475), 'models.neural.MultiHeadedAttention', 'MultiHeadedAttention', (['heads', 'd_model'], {'dropout': 'dropout'}), '(heads, d_model, dropout=dropout)\n', (5442, 5475), False, 'from models.neural import MultiHeadedAttention, PositionwiseFeedForward\n'), ((5517, 5564), 'models.neural.PositionwiseFeedForward', 'PositionwiseFeedForward', (['d_model', 'd_ff', 'dropout'], {}), '(d_model, d_ff, dropout)\n', (5540, 5564), False, 'from models.neural import MultiHeadedAttention, PositionwiseFeedForward\n'), ((5591, 5623), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': '(1e-06)'}), '(d_model, eps=1e-06)\n', (5603, 5623), True, 'import torch.nn as nn\n'), ((6513, 6532), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (6523, 6532), True, 'import torch.nn as nn\n'), ((6559, 6591), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': '(1e-06)'}), '(d_model, eps=1e-06)\n', (6571, 6591), True, 'import torch.nn as nn\n'), ((6609, 6648), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_hidden'], {'bias': '(True)'}), '(d_model, d_hidden, bias=True)\n', (6618, 6648), True, 'import torch.nn as nn\n'), ((6668, 6707), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_hidden'], {'bias': '(True)'}), '(d_model, d_hidden, bias=True)\n', (6677, 6707), True, 'import torch.nn as nn\n'), ((6725, 6758), 'torch.nn.Linear', 'nn.Linear', (['d_hidden', '(1)'], {'bias': '(True)'}), '(d_hidden, 1, bias=True)\n', (6734, 6758), True, 'import torch.nn as nn\n'), ((6777, 6791), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (6789, 6791), True, 'import torch.nn as nn\n'), ((6815, 6833), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (6825, 6833), True, 'import torch.nn as nn\n'), ((7959, 8077), 'models.rnn.LayerNormLSTM', 'LayerNormLSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'bidirectional': 'bidirectional'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, bidirectional=bidirectional)\n', (7972, 8077), False, 'from models.rnn import LayerNormLSTM\n'), ((8141, 8194), 'torch.nn.Linear', 'nn.Linear', (['(num_directions * hidden_size)', '(1)'], {'bias': '(True)'}), '(num_directions * hidden_size, 1, bias=True)\n', (8150, 8194), True, 'import torch.nn as nn\n'), ((8218, 8237), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (8228, 8237), True, 'import torch.nn as nn\n'), ((8261, 8273), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (8271, 8273), True, 'import torch.nn as nn\n'), ((8367, 8391), 'torch.transpose', 'torch.transpose', (['x', '(1)', '(0)'], {}), '(x, 1, 0)\n', (8382, 8391), False, 'import torch\n'), ((8503, 8537), 'torch.transpose', 'torch.transpose', (['memory_bank', '(1)', '(0)'], {}), '(memory_bank, 1, 0)\n', (8518, 8537), False, 'import torch\n'), ((8935, 8953), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'drop'}), '(p=drop)\n', (8945, 8953), True, 'import torch.nn as nn\n'), ((9322, 9343), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9329, 9343), True, 'import torch.nn as nn\n'), ((1206, 1225), 'math.sqrt', 'math.sqrt', (['self.dim'], {}), '(self.dim)\n', (1215, 1225), False, 'import math\n'), ((665, 689), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (677, 689), False, 'import torch\n'), ((733, 775), 'torch.arange', 'torch.arange', (['(0)', 'dim', '(2)'], {'dtype': 'torch.float'}), '(0, dim, 2, dtype=torch.float)\n', (745, 775), False, 'import torch\n'), ((810, 827), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (818, 827), False, 'import math\n')] |
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import DropboxOAuth2Provider
class DropboxOAuth2Adapter(OAuth2Adapter):
provider_id = DropboxOAuth2Provider.id
access_token_url = "https://api.dropbox.com/oauth2/token"
authorize_url = "https://www.dropbox.com/oauth2/authorize"
profile_url = "https://api.dropbox.com/2/users/get_current_account"
redirect_uri_protocol = "https"
def complete_login(self, request, app, token, **kwargs):
response = requests.post(
self.profile_url,
headers={"Authorization": "Bearer %s" % (token.token,)},
)
response.raise_for_status()
return self.get_provider().sociallogin_from_response(request, response.json())
oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
| [
"allauth.socialaccount.providers.oauth2.views.OAuth2CallbackView.adapter_view",
"requests.post",
"allauth.socialaccount.providers.oauth2.views.OAuth2LoginView.adapter_view"
] | [((852, 902), 'allauth.socialaccount.providers.oauth2.views.OAuth2LoginView.adapter_view', 'OAuth2LoginView.adapter_view', (['DropboxOAuth2Adapter'], {}), '(DropboxOAuth2Adapter)\n', (880, 902), False, 'from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView\n'), ((920, 973), 'allauth.socialaccount.providers.oauth2.views.OAuth2CallbackView.adapter_view', 'OAuth2CallbackView.adapter_view', (['DropboxOAuth2Adapter'], {}), '(DropboxOAuth2Adapter)\n', (951, 973), False, 'from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView\n'), ((589, 682), 'requests.post', 'requests.post', (['self.profile_url'], {'headers': "{'Authorization': 'Bearer %s' % (token.token,)}"}), "(self.profile_url, headers={'Authorization': 'Bearer %s' % (\n token.token,)})\n", (602, 682), False, 'import requests\n')] |
# SPDX-License-Identifier: MIT
# pylint: disable=redefined-builtin,invalid-name
"""See https://www.sphinx-doc.org/en/master/usage/configuration.html"""
from typing import Sequence
import os
import sys
# region Path setup
sys.path.insert(0, os.path.abspath('..'))
# endregion
# region Project information
project = 'Upkeep'
copyright = '2020, <NAME>'
author = '<NAME>'
# The short X.Y version
version = '1.2.7'
# The full version, including alpha/beta/rc tags
release = f'v{version}'
# endregion
# region General configuration
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: Sequence[str] = []
master_doc = 'index'
# endregion
# region Options for HTML output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# endregion
# region Extension configuration
# endregion
| [
"os.path.abspath"
] | [((241, 262), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (256, 262), False, 'import os\n')] |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import unittest
from aenum import Enum, auto
from dace import registry
@registry.make_registry
class ExtensibleClass(object):
pass
class Extension(ExtensibleClass):
pass
@registry.extensible_enum
class ExtensibleEnumeration(Enum):
a = auto()
b = auto()
class RegistryTests(unittest.TestCase):
def test_class_registry(self):
ExtensibleClass.register(Extension)
self.assertTrue(Extension in ExtensibleClass.extensions())
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister(self):
@registry.autoregister
class Extension2(ExtensibleClass):
pass
self.assertTrue(Extension2 in ExtensibleClass.extensions())
def test_class_registry_args(self):
ExtensibleClass.register(Extension, a=True, b=1, c=2)
self.assertTrue(Extension in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension], dict(a=True, b=1, c=2))
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister_args(self):
@registry.autoregister_params(a=False, b=0)
class Extension3(ExtensibleClass):
pass
self.assertTrue(Extension3 in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension3], dict(a=False, b=0))
def test_autoregister_fail(self):
with self.assertRaises(TypeError):
@registry.autoregister
class Extension4(object):
pass
def test_enum_registry(self):
ExtensibleEnumeration.register('c')
self.assertTrue(ExtensibleEnumeration.c in ExtensibleEnumeration)
self.assertEqual(ExtensibleEnumeration.c.value, 3)
def test_enum_registry_fail(self):
with self.assertRaises(TypeError):
@registry.extensible_enum
class NotAnEnum(object):
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"dace.registry.autoregister_params",
"aenum.auto"
] | [((329, 335), 'aenum.auto', 'auto', ([], {}), '()\n', (333, 335), False, 'from aenum import Enum, auto\n'), ((344, 350), 'aenum.auto', 'auto', ([], {}), '()\n', (348, 350), False, 'from aenum import Enum, auto\n'), ((2136, 2151), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2149, 2151), False, 'import unittest\n'), ((1275, 1317), 'dace.registry.autoregister_params', 'registry.autoregister_params', ([], {'a': '(False)', 'b': '(0)'}), '(a=False, b=0)\n', (1303, 1317), False, 'from dace import registry\n')] |
import asyncio
import logging
from json import loads
import pytest
from aiologstash2 import create_tcp_handler
logging.getLogger().setLevel(logging.DEBUG)
class FakeTcpServer:
def __init__(self):
self.data = bytearray()
self.server = None
self.futs = set()
async def start(self):
self.server = await asyncio.start_server(self.on_connect, host="127.0.0.1")
@property
def port(self):
return self.server.sockets[0].getsockname()[1]
@property
def jsons(self):
s = self.data.decode("utf8")
return [loads(i) for i in s.split("\n") if i]
async def close(self):
if self.server is None:
return
self.server.close()
await self.server.wait_closed()
self.server = None
async def on_connect(self, reader, writer):
while True:
data = await reader.read(1024)
if not data:
break
self.data.extend(data)
for fut in self.futs:
if not fut.done():
fut.set_result(None)
async def wait(self):
fut = asyncio.get_event_loop().create_future()
self.futs.add(fut)
await fut
self.futs.remove(fut)
@pytest.fixture
async def make_tcp_server():
servers = []
async def go():
server = FakeTcpServer()
await server.start()
servers.append(server)
return server
yield go
async def finalize():
for server in servers:
await server.close()
await finalize()
@pytest.fixture
async def make_tcp_handler(make_tcp_server):
handlers = []
async def go(*args, level=logging.DEBUG, **kwargs):
server = await make_tcp_server()
handler = await create_tcp_handler("127.0.0.1", server.port, **kwargs)
handlers.append(handler)
return handler, server
yield go
async def finalize():
for handler in handlers:
handler.close()
await handler.wait_closed()
await finalize()
@pytest.fixture
async def setup_logger(make_tcp_handler):
async def go(*args, **kwargs):
handler, server = await make_tcp_handler(*args, **kwargs)
logger = logging.getLogger("aiologstash_test")
logger.addHandler(handler)
return logger, handler, server
yield go
| [
"logging.getLogger",
"json.loads",
"aiologstash2.create_tcp_handler",
"asyncio.start_server",
"asyncio.get_event_loop"
] | [((115, 134), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (132, 134), False, 'import logging\n'), ((2246, 2283), 'logging.getLogger', 'logging.getLogger', (['"""aiologstash_test"""'], {}), "('aiologstash_test')\n", (2263, 2283), False, 'import logging\n'), ((347, 402), 'asyncio.start_server', 'asyncio.start_server', (['self.on_connect'], {'host': '"""127.0.0.1"""'}), "(self.on_connect, host='127.0.0.1')\n", (367, 402), False, 'import asyncio\n'), ((582, 590), 'json.loads', 'loads', (['i'], {}), '(i)\n', (587, 590), False, 'from json import loads\n'), ((1785, 1839), 'aiologstash2.create_tcp_handler', 'create_tcp_handler', (['"""127.0.0.1"""', 'server.port'], {}), "('127.0.0.1', server.port, **kwargs)\n", (1803, 1839), False, 'from aiologstash2 import create_tcp_handler\n'), ((1139, 1163), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1161, 1163), False, 'import asyncio\n')] |
"""TcEx Framework Playbook module"""
# standard library
import base64
import json
import re
from collections import OrderedDict
from collections.abc import Iterable
class PlaybooksBase:
"""TcEx Playbook Module Base Class
Args:
tcex (TcEx): Instance of TcEx class.
context (str): The Redis context (hash).
output_variables (list): The requested output variables.
"""
def __init__(self, tcex, context, output_variables):
"""Initialize the Class properties."""
self.tcex = tcex
self._context = context
self._output_variables = output_variables or []
# properties
self._output_variables_by_name = None
self._output_variables_by_type = None
self.log = tcex.log
# match full variable
self._variable_match = re.compile(fr'^{self._variable_pattern}$')
# capture variable parts (exactly a variable)
self._variable_parse = re.compile(self._variable_pattern)
# match embedded variables without quotes (#App:7979:variable_name!StringArray)
self._vars_keyvalue_embedded = re.compile(fr'(?:\"\:\s?)[^\"]?{self._variable_pattern}')
def _coerce_string_value(self, value):
"""Return a string value from an bool or int."""
# coerce bool before int as python says a bool is an int
if isinstance(value, bool):
# coerce bool to str type
self.log.warning(f'Coercing bool value ({value}) to a string ("{str(value).lower()}").')
value = str(value).lower()
# coerce int to str type
if isinstance(value, (float, int)):
self.log.warning(f'Coercing float/int value ({value}) to a string ("{str(value)}").')
value = str(value)
return value
def _create(self, key, value, validate=True):
"""Create the value in Redis if applicable."""
if key is None or value is None:
self.log.warning('The key or value field is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
if variable_type == 'Binary':
# if not isinstance(value, bytes):
# value = value.encode('utf-8')
if validate and not isinstance(value, bytes):
raise RuntimeError('Invalid data provided for Binary.')
value = base64.b64encode(value).decode('utf-8')
elif variable_type == 'KeyValue':
if validate and (not isinstance(value, dict) or not self._is_key_value(value)):
raise RuntimeError('Invalid data provided for KeyValue.')
elif variable_type == 'String':
# coerce string values
value = self._coerce_string_value(value)
if validate and not isinstance(value, str):
raise RuntimeError('Invalid data provided for String.')
elif variable_type == 'TCEntity':
if validate and (not isinstance(value, dict) or not self._is_tc_entity(value)):
raise RuntimeError('Invalid data provided for TcEntity.')
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
try:
value = json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to serialize value ({e}).')
try:
return self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
return None
def _create_array(self, key, value, validate=True):
"""Create the value in Redis if applicable."""
if key is None or value is None:
self.log.warning('The key or value field is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
# Enhanced entity array is the wild-wild west, don't validate it
if variable_type != 'TCEnhancedEntityArray':
if validate and (not isinstance(value, Iterable) or isinstance(value, (str, dict))):
raise RuntimeError(f'Invalid data provided for {variable_type}.')
value = [
*value
] # spread the value so that we know it's a list (as opposed to an iterable)
if variable_type == 'BinaryArray':
value_encoded = []
for v in value:
if v is not None:
if validate and not isinstance(v, bytes):
raise RuntimeError('Invalid data provided for Binary.')
# if not isinstance(v, bytes):
# v = v.encode('utf-8')
v = base64.b64encode(v).decode('utf-8')
value_encoded.append(v)
value = value_encoded
elif variable_type == 'KeyValueArray':
if validate and not self._is_key_value_array(value):
raise RuntimeError('Invalid data provided for KeyValueArray.')
elif variable_type == 'StringArray':
value_coerced = []
for v in value:
# coerce string values
v = self._coerce_string_value(v)
if validate and not isinstance(v, (type(None), str)):
raise RuntimeError('Invalid data provided for StringArray.')
value_coerced.append(v)
value = value_coerced
elif variable_type == 'TCEntityArray':
if validate and not self._is_tc_entity_array(value):
raise RuntimeError('Invalid data provided for TcEntityArray.')
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
try:
value = json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to serialize value ({e}).')
try:
return self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
return None
@staticmethod
def _decode_binary(data):
"""Return decoded bytes data handling data written by java apps."""
try:
data = data.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
# for data written an upstream java App
data = data.decode('latin-1')
return data
@staticmethod
def _is_key_value(data):
"""Return True if provided data has proper structure for Key Value."""
if data is None:
return False
return all(x in data for x in ['key', 'value'])
def _is_key_value_array(self, data):
"""Return True if provided data has proper structure for Key Value Array."""
for d in data:
if not self._is_key_value(d):
return False
return True
@staticmethod
def _is_tc_entity(data):
"""Return True if provided data has proper structure for TC Entity."""
if data is None:
return False
return all(x in data for x in ['id', 'value', 'type'])
def _is_tc_entity_array(self, data):
"""Return True if provided data has proper structure for TC Entity Array."""
for d in data:
if not self._is_tc_entity(d):
return False
return True
@staticmethod
def _load_value(value):
"""Return the loaded JSON value or raise an error.
Args:
value (str): The data from key/value store.
Raises:
RuntimeError: Raise error when data can't be loaded as JSON data.
Returns:
any: The de-serialized value from the key/value store.
"""
try:
return json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to JSON load data "{value}" ({e}).')
def _parse_output_variables(self):
"""Parse the output variables provided to Playbook Class.
**Example Variable Format**::
['#App:1234:status!String', '#App:1234:status_code!String']
"""
self._output_variables_by_name = {}
self._output_variables_by_type = {}
for ov in self._output_variables:
# parse the variable to get individual parts
parsed_variable = self.parse_variable(ov)
variable_name = parsed_variable.get('name')
variable_type = parsed_variable.get('type')
# store the variables in dict by name (e.g. "status_code")
self._output_variables_by_name[variable_name] = {'variable': ov}
# store the variables in dict by name-type (e.g. "status_code-String")
self._output_variables_by_type[f'{variable_name}-{variable_type}'] = {'variable': ov}
def _read(self, key, embedded=True, b64decode=True, decode=False):
"""Create the value in Redis if applicable."""
if key is None:
self.log.warning('The key is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
try:
value = self.tcex.key_value_store.read(self._context, key.strip())
except RuntimeError as e:
self.log.error(e)
return None
if value is None:
return value
if variable_type == 'Binary':
value = self._load_value(value)
if b64decode:
value = base64.b64decode(value)
if decode:
value = self._decode_binary(value)
elif variable_type == 'KeyValue':
# embedded variable can be unquoted, which breaks JSON.
value = self._wrap_embedded_keyvalue(value)
if embedded:
value = self._read_embedded(value)
value = self._load_value(value)
elif variable_type == 'String':
if embedded:
value = self._read_embedded(value)
# coerce string values
value = self._coerce_string_value(self._load_value(value))
elif variable_type == 'TCEntity':
value = self._load_value(value)
return value
def _read_array(self, key, embedded=True, b64decode=True, decode=False):
"""Create the value in Redis if applicable."""
if key is None: # pragma: no cover
self.log.warning('The null value for key was provided.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
try:
value = self.tcex.key_value_store.read(self._context, key.strip())
except RuntimeError as e:
self.log.error(e)
return None
if value is None:
return value
if variable_type == 'BinaryArray':
value = json.loads(value, object_pairs_hook=OrderedDict)
values = []
for v in value:
if v is not None and b64decode:
v = base64.b64decode(v)
if decode:
v = self._decode_binary(v)
values.append(v)
value = values
elif variable_type == 'KeyValueArray':
# embedded variable can be unquoted, which breaks JSON.
value = self._wrap_embedded_keyvalue(value)
if embedded:
value = self._read_embedded(value)
try:
value = json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed loading JSON data ({value}). Error: ({e})')
elif variable_type == 'StringArray':
if embedded:
value = self._read_embedded(value)
# convert int to str
value_coerced = []
for v in self._load_value(value):
# coerce string values
value_coerced.append(self._coerce_string_value(v))
value = value_coerced
elif variable_type in ['TCEntityArray', 'TCEnhancedEntity', 'TCEnhancedEntityArray']:
value = self._load_value(value)
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
return value
def _read_embedded(self, value):
"""Read method for "embedded" variables.
.. Note:: The ``read()`` method will automatically determine if the input is a variable or
needs to be searched for embedded variables.
Embedded variable rules:
* Only user input can have embedded variables.
* Only String and KeyValueArray variables can have embedded variables.
* Variables can only be embedded one level deep.
This method will automatically covert variables embedded in a string with value retrieved
from DB. If there are no keys/variables the raw string will be returned.
Examples::
DB Values
#App:7979:variable_name!String:
"embedded \\"variable\\""
#App:7979:two!String:
"two"
#App:7979:variable_name!StringArray:
["one", "two", "three"]
Examples 1:
Input: "This input has a embedded #App:7979:variable_name!String"
Examples 2:
Input: ["one", #App:7979:two!String, "three"]
Examples 3:
Input: [{
"key": "embedded string",
"value": "This input has a embedded #App:7979:variable_name!String"
}, {
"key": "string array",
"value": #App:7979:variable_name!StringArray
}, {
"key": "string",
"value": #App:7979:variable_name!String
}]
Args:
value (str): The value to parsed and updated from the DB.
Returns:
(str): Results retrieved from DB
"""
if value is None: # pragma: no cover
return value
for variable in (v.group(0) for v in re.finditer(self._variable_parse, str(value))):
v = self.read(variable)
self.log.trace(f'embedded variable: {variable}, value: {v}')
if isinstance(v, (dict, list)):
v = json.dumps(v)
# for KeyValueArray with nested dict/list type replace the
# quoted value to ensure the resulting data is loadable JSON
value = re.sub(f'"{variable}"', v, value)
if v is not None:
# only replace variable if a non-null value is returned from kv store
# APP-1030 need to revisit this to handle variable references in kv/kvarrays that
# are None. Would like to be able to say if value is just the variable reference,
# sub None value, else insert '' in string. That would require a kv-specific
# version of this method that gets the entire list/dict instead of just the string.
value = re.sub(variable, v, value)
return value
@property
def _variable_pattern(self):
"""Regex pattern to match and parse a playbook variable."""
variable_pattern = r'#([A-Za-z]+)' # match literal (#App,#Trigger) at beginning of String
variable_pattern += r':([\d]+)' # app id (:7979)
variable_pattern += r':([A-Za-z0-9_\.\-\[\]]+)' # variable name (:variable_name)
variable_pattern += r'!(StringArray|BinaryArray|KeyValueArray' # variable type (array)
variable_pattern += r'|TCEntityArray|TCEnhancedEntityArray' # variable type (array)
variable_pattern += r'|String|Binary|KeyValue|TCEntity|TCEnhancedEntity' # variable type
variable_pattern += r'|(?:(?!String)(?!Binary)(?!KeyValue)' # non matching for custom
variable_pattern += r'(?!TCEntity)(?!TCEnhancedEntity)' # non matching for custom
variable_pattern += r'[A-Za-z0-9_-]+))' # variable type (custom)
return variable_pattern
@property
def _variable_array_types(self):
"""Return list of standard playbook array variable types."""
return [
'BinaryArray',
'KeyValueArray',
'StringArray',
'TCEntityArray',
'TCEnhancedEntityArray',
]
@property
def _variable_single_types(self):
"""Return list of standard playbook single variable types."""
return [
'Binary',
'KeyValue',
'String',
'TCEntity',
'TCEnhancedEntity',
]
@property
def _variable_types(self):
"""Return list of standard playbook variable typesd."""
return self._variable_single_types + self._variable_array_types
def _wrap_embedded_keyvalue(self, data):
"""Wrap keyvalue embedded variable in double quotes.
Args:
data (str): The data with embedded variables.
Returns:
(str): Results retrieved from DB
"""
# TODO: need to verify if core still sends improper JSON for KeyValueArrays
if data is not None: # pragma: no cover
variables = []
for v in re.finditer(self._vars_keyvalue_embedded, data):
variables.append(v.group(0))
for var in set(variables): # recursion over set to handle duplicates
# pull (#App:1441:embedded_string!String) from (": #App:1441:embedded_string!String)
variable_string = re.search(self._variable_parse, var).group(0)
# reformat to replace the correct instance only, handling the case where a variable
# is embedded multiple times in the same key value array.
data = data.replace(var, f'": "{variable_string}"')
return data
def create_raw(self, key, value):
"""Create method of CRUD operation for raw data.
..important:: Raw data can only be a byte, str or int. Other data structures
(dict, list, etc) must be serialized.
Args:
key (str): The variable to write to the DB.
value (bytes|int|string): The data to write to the DB.
Returns:
(str): Result of DB write.
"""
data = None
if key is not None and value is not None:
try:
data = self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
else:
self.log.warning('The key or value field was None.')
return data
def read_raw(self, key):
"""Read method of CRUD operation for raw data.
..important:: Bytes input will be returned a as string as there is
no way to determine data from redis originated as bytes or string.
Args:
key (str): The variable to read from the DB.
Returns:
(str): Results retrieved from DB.
"""
value = None
if key is not None:
value = self.tcex.key_value_store.read(self._context, key.strip())
else:
self.log.warning('The key field was None.')
return value
def parse_variable(self, variable): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
def read(self, key, array=False, embedded=True): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
def variable_type(self, variable): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
| [
"json.loads",
"re.compile",
"base64.b64encode",
"json.dumps",
"base64.b64decode",
"re.finditer",
"re.sub",
"re.search"
] | [((827, 868), 're.compile', 're.compile', (['f"""^{self._variable_pattern}$"""'], {}), "(f'^{self._variable_pattern}$')\n", (837, 868), False, 'import re\n'), ((955, 989), 're.compile', 're.compile', (['self._variable_pattern'], {}), '(self._variable_pattern)\n', (965, 989), False, 'import re\n'), ((1117, 1177), 're.compile', 're.compile', (['f"""(?:\\\\"\\\\:\\\\s?)[^\\\\"]?{self._variable_pattern}"""'], {}), '(f\'(?:\\\\"\\\\:\\\\s?)[^\\\\"]?{self._variable_pattern}\')\n', (1127, 1177), False, 'import re\n'), ((3241, 3258), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (3251, 3258), False, 'import json\n'), ((5784, 5801), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (5794, 5801), False, 'import json\n'), ((7810, 7858), 'json.loads', 'json.loads', (['value'], {'object_pairs_hook': 'OrderedDict'}), '(value, object_pairs_hook=OrderedDict)\n', (7820, 7858), False, 'import json\n'), ((10979, 11027), 'json.loads', 'json.loads', (['value'], {'object_pairs_hook': 'OrderedDict'}), '(value, object_pairs_hook=OrderedDict)\n', (10989, 11027), False, 'import json\n'), ((17427, 17474), 're.finditer', 're.finditer', (['self._vars_keyvalue_embedded', 'data'], {}), '(self._vars_keyvalue_embedded, data)\n', (17438, 17474), False, 'import re\n'), ((9590, 9613), 'base64.b64decode', 'base64.b64decode', (['value'], {}), '(value)\n', (9606, 9613), False, 'import base64\n'), ((14492, 14505), 'json.dumps', 'json.dumps', (['v'], {}), '(v)\n', (14502, 14505), False, 'import json\n'), ((14682, 14715), 're.sub', 're.sub', (['f""""{variable}\\""""', 'v', 'value'], {}), '(f\'"{variable}"\', v, value)\n', (14688, 14715), False, 'import re\n'), ((15248, 15274), 're.sub', 're.sub', (['variable', 'v', 'value'], {}), '(variable, v, value)\n', (15254, 15274), False, 'import re\n'), ((2400, 2423), 'base64.b64encode', 'base64.b64encode', (['value'], {}), '(value)\n', (2416, 2423), False, 'import base64\n'), ((11153, 11172), 'base64.b64decode', 'base64.b64decode', (['v'], {}), '(v)\n', (11169, 11172), False, 'import base64\n'), ((11605, 11653), 'json.loads', 'json.loads', (['value'], {'object_pairs_hook': 'OrderedDict'}), '(value, object_pairs_hook=OrderedDict)\n', (11615, 11653), False, 'import json\n'), ((17739, 17775), 're.search', 're.search', (['self._variable_parse', 'var'], {}), '(self._variable_parse, var)\n', (17748, 17775), False, 'import re\n'), ((4746, 4765), 'base64.b64encode', 'base64.b64encode', (['v'], {}), '(v)\n', (4762, 4765), False, 'import base64\n')] |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
import horovod.tensorflow as hvd
import time
from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags
from utils.gpu_affinity import set_affinity
import utils.dllogger_class
from dllogger import Verbosity
from utils.create_glue_data import *
import numpy as np
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"dllog_path", "/results/bert_dllog.json",
"filename where dllogger writes to")
flags.DEFINE_string(
"optimizer_type", "lamb",
"Optimizer type : adam or lamb")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("use_trt", False, "Whether to use TF-TRT")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("display_loss_steps", 10,
"How often to print loss from estimator")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update"
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.")
flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training,
drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias, name='cls_logits')
probabilities = tf.nn.softmax(logits, axis=-1, name='cls_probabilities')
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss')
loss = tf.reduce_mean(per_example_loss, name='cls_loss')
return (loss, per_example_loss, logits, probabilities)
def get_frozen_tftrt_model(bert_config, shape, num_labels, use_one_hot_embeddings, init_checkpoint):
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
output_node_names = ['loss/cls_loss', 'loss/cls_per_example_loss', 'loss/cls_logits', 'loss/cls_probabilities']
with tf.Session(config=tf_config) as tf_sess:
input_ids = tf.placeholder(tf.int32, shape, 'input_ids')
input_mask = tf.placeholder(tf.int32, shape, 'input_mask')
segment_ids = tf.placeholder(tf.int32, shape, 'segment_ids')
label_ids = tf.placeholder(tf.int32, (None), 'label_ids')
create_model(bert_config, False, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf_sess.run(tf.global_variables_initializer())
print("LOADED!")
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
else:
init_string = ", *NOTTTTTTTTTTTTTTTTTTTTT"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
frozen_graph = tf.graph_util.convert_variables_to_constants(tf_sess,
tf_sess.graph.as_graph_def(), output_node_names)
num_nodes = len(frozen_graph.node)
print('Converting graph using TensorFlow-TensorRT...')
from tensorflow.python.compiler.tensorrt import trt_convert as trt
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=output_node_names,
max_workspace_size_bytes=(4096 << 20) - 1000,
precision_mode = "FP16" if FLAGS.amp else "FP32",
minimum_segment_size=4,
is_dynamic_op=True,
maximum_cached_engines=1000
)
frozen_graph = converter.convert()
print('Total node count before and after TF-TRT conversion:',
num_nodes, '->', len(frozen_graph.node))
print('TRT node count:',
len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp']))
with tf.io.gfile.GFile("frozen_modelTRT.pb", "wb") as f:
f.write(frozen_graph.SerializeToString())
return frozen_graph
def model_fn_builder(task_name, bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, hvd=None):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
if task_name == "cola":
FN, FN_op = tf.metrics.false_negatives(labels=label_ids, predictions=predictions)
FP, FP_op = tf.metrics.false_positives(labels=label_ids, predictions=predictions)
TP, TP_op = tf.metrics.true_positives(labels=label_ids, predictions=predictions)
TN, TN_op = tf.metrics.true_negatives(labels=label_ids, predictions=predictions)
MCC = (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) ** 0.5
MCC_op = tf.group(FN_op, TN_op, TP_op, FP_op, tf.identity(MCC, name="MCC"))
return {"MCC": (MCC, MCC_op)}
elif task_name == "mrpc":
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
f1 = tf_metrics.f1(labels=label_ids, predictions=predictions, num_classes=2, pos_indices=[1])
return {
"eval_accuracy": accuracy,
"eval_f1": f1,
"eval_loss": loss,
}
else:
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
tf.compat.v1.logging.info("*** Features ***")
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if not is_training and FLAGS.use_trt:
trt_graph = get_frozen_tftrt_model(bert_config, input_ids.shape, num_labels, use_one_hot_embeddings, init_checkpoint)
(total_loss, per_example_loss, logits, probabilities) = tf.import_graph_def(trt_graph,
input_map={'input_ids':input_ids, 'input_mask':input_mask, 'segment_ids':segment_ids, 'label_ids':label_ids},
return_elements=['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0', 'loss/cls_probabilities:0'],
name='')
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"probabilities": probabilities}
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
return output_spec
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint and (hvd is None or hvd.rank() == 0):
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps, FLAGS.optimizer_type)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
loss_scaler = tf.train.experimental.FixedLossScale(1)
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0), loss_scaler)
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
else:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0))
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=probabilities)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, batch_size, seq_length, is_training, drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn():
"""The actual input function."""
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def main(_):
setup_xla_flags()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
dllogging = utils.dllogger_class.dllogger_class(FLAGS.dllog_path)
if FLAGS.horovod:
hvd.init()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.io.gfile.makedirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
master_process = True
training_hooks = []
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps
hvd_rank = 0
config = tf.compat.v1.ConfigProto()
if FLAGS.horovod:
tf.compat.v1.logging.info("Multi-GPU training with TF Horovod")
tf.compat.v1.logging.info("hvd.size() = %d hvd.rank() = %d", hvd.size(), hvd.rank())
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps * hvd.size()
master_process = (hvd.rank() == 0)
hvd_rank = hvd.rank()
config.gpu_options.visible_device_list = str(hvd.local_rank())
set_affinity(hvd.local_rank())
if hvd.size() > 1:
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
if FLAGS.amp:
tf.enable_resource_variables()
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir if master_process else None,
session_config=config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None,
save_summary_steps=FLAGS.save_checkpoints_steps if master_process else None,
log_step_count_steps=FLAGS.display_loss_steps,
keep_checkpoint_max=1)
if master_process:
tf.compat.v1.logging.info("***** Configuaration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
train_examples = None
num_train_steps = None
num_warmup_steps = None
training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps, num_steps_ignore_xla=25))
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / global_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
start_index = 0
end_index = len(train_examples)
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")]
if FLAGS.horovod:
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())]
num_examples_per_rank = len(train_examples) // hvd.size()
remainder = len(train_examples) % hvd.size()
if hvd.rank() < remainder:
start_index = hvd.rank() * (num_examples_per_rank+1)
end_index = start_index + num_examples_per_rank + 1
else:
start_index = hvd.rank() * num_examples_per_rank + remainder
end_index = start_index + (num_examples_per_rank)
model_fn = model_fn_builder(
task_name=task_name,
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd.size(),
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False,
hvd=None if not FLAGS.horovod else hvd)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
file_based_convert_examples_to_features(
train_examples[start_index:end_index], label_list, FLAGS.max_seq_length, tokenizer, tmp_filenames[hvd_rank])
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num examples = %d", len(train_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=tmp_filenames,
batch_size=FLAGS.train_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
hvd=None if not FLAGS.horovod else hvd)
train_start_time = time.time()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=training_hooks)
train_time_elapsed = time.time() - train_start_time
train_time_wo_overhead = training_hooks[-1].total_time
avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed
ss_sentences_per_second = (training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead
if master_process:
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed,
num_train_steps * global_batch_size)
tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead,
(training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
if FLAGS.do_eval and master_process:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
batch_size=FLAGS.eval_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
eval_start_time = time.time()
result = estimator.evaluate(input_fn=eval_input_fn, hooks=eval_hooks)
eval_time_elapsed = time.time() - eval_start_time
time_list = eval_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
eval_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.eval_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed,
eval_hooks[-1].count * FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on EVAL set")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
tf.compat.v1.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
dllogging.logger.log(step=(), data={key: float(result[key])}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict and master_process:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.compat.v1.logging.info("***** Running prediction*****")
tf.compat.v1.logging.info(" Num examples = %d", len(predict_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
batch_size=FLAGS.predict_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
predict_hooks = [LogEvalRunHook(FLAGS.predict_batch_size)]
predict_start_time = time.time()
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.io.gfile.GFile(output_predict_file, "w") as writer:
tf.compat.v1.logging.info("***** Predict results *****")
for prediction in estimator.predict(input_fn=predict_input_fn, hooks=predict_hooks,
yield_single_examples=False):
output_line = "\t".join(
str(class_probability) for class_probability in prediction) + "\n"
writer.write(output_line)
predict_time_elapsed = time.time() - predict_start_time
time_list = predict_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
predict_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.predict_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / predict_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", predict_time_elapsed,
predict_hooks[-1].count * FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", predict_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on TEST SET")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
| [
"utils.utils.setup_xla_flags",
"horovod.tensorflow.init",
"tensorflow.reduce_sum",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.truncated_normal_initializer",
"tensorflow.metrics.mean",
"tensorflow.python.compiler.tensorrt.trt_convert.TrtGraphConverter",
"tensorflow.io.FixedLenFeature",
"tensorflow.nn.dropout",
"tensorflow.nn.softmax",
"tensorflow.zeros_initializer",
"tensorflow.reduce_mean",
"horovod.tensorflow.local_rank",
"numpy.mean",
"tensorflow.io.gfile.GFile",
"tensorflow.train.init_from_checkpoint",
"utils.utils.LogTrainRunHook",
"tensorflow.Session",
"tensorflow.estimator.Estimator",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.app.run",
"tensorflow.placeholder",
"tensorflow.metrics.accuracy",
"modeling.BertModel",
"tensorflow.matmul",
"horovod.tensorflow.size",
"tensorflow.trainable_variables",
"tokenization.FullTokenizer",
"modeling.get_assignment_map_from_checkpoint",
"tensorflow.metrics.true_positives",
"horovod.tensorflow.BroadcastGlobalVariablesHook",
"tensorflow.one_hot",
"tensorflow.variable_scope",
"tensorflow.train.experimental.FixedLossScale",
"optimization.create_optimizer",
"tensorflow.parse_single_example",
"optimization.LAMBOptimizer",
"tensorflow.nn.log_softmax",
"tensorflow.to_int32",
"tensorflow.metrics.false_positives",
"tf_metrics.f1",
"modeling.BertConfig.from_json_file",
"tensorflow.import_graph_def",
"time.time",
"tensorflow.enable_resource_variables",
"tensorflow.nn.bias_add",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.data.TFRecordDataset",
"tensorflow.metrics.false_negatives",
"tensorflow.estimator.RunConfig",
"tensorflow.metrics.true_negatives",
"tensorflow.no_op",
"os.path.join",
"tensorflow.io.gfile.makedirs",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.constant",
"utils.utils.LogEvalRunHook",
"tensorflow.identity",
"horovod.tensorflow.rank"
] | [((6344, 6560), 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings', 'compute_type': 'tf.float32'}), '(config=bert_config, is_training=is_training, input_ids=\n input_ids, input_mask=input_mask, token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings, compute_type=tf.float32)\n', (6362, 6560), False, 'import modeling\n'), ((7956, 7982), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (7980, 7982), True, 'import tensorflow as tf\n'), ((17775, 17792), 'utils.utils.setup_xla_flags', 'setup_xla_flags', ([], {}), '()\n', (17790, 17792), False, 'from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags\n'), ((17796, 17857), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.INFO'], {}), '(tf.compat.v1.logging.INFO)\n', (17830, 17857), True, 'import tensorflow as tf\n'), ((18291, 18349), 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), '(FLAGS.bert_config_file)\n', (18325, 18349), False, 'import modeling\n'), ((18628, 18666), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (18648, 18666), True, 'import tensorflow as tf\n'), ((18891, 18986), 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), '(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS\n .do_lower_case)\n', (18917, 18986), False, 'import tokenization\n'), ((19139, 19165), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (19163, 19165), True, 'import tensorflow as tf\n'), ((19898, 20245), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'model_dir': '(FLAGS.output_dir if master_process else None)', 'session_config': 'config', 'save_checkpoints_steps': '(FLAGS.save_checkpoints_steps if master_process else None)', 'save_summary_steps': '(FLAGS.save_checkpoints_steps if master_process else None)', 'log_step_count_steps': 'FLAGS.display_loss_steps', 'keep_checkpoint_max': '(1)'}), '(model_dir=FLAGS.output_dir if master_process else\n None, session_config=config, save_checkpoints_steps=FLAGS.\n save_checkpoints_steps if master_process else None, save_summary_steps=\n FLAGS.save_checkpoints_steps if master_process else None,\n log_step_count_steps=FLAGS.display_loss_steps, keep_checkpoint_max=1)\n', (19920, 20245), True, 'import tensorflow as tf\n'), ((22110, 22170), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'model_fn', 'config': 'run_config'}), '(model_fn=model_fn, config=run_config)\n', (22132, 22170), True, 'import tensorflow as tf\n'), ((31757, 31779), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (31777, 31779), True, 'import tensorflow as tf\n'), ((4847, 4892), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (4868, 4892), True, 'import tensorflow as tf\n'), ((4914, 4959), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (4935, 4959), True, 'import tensorflow as tf\n'), ((4982, 5027), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (5003, 5027), True, 'import tensorflow as tf\n'), ((5048, 5083), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (5069, 5083), True, 'import tensorflow as tf\n'), ((5204, 5253), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), '(record, name_to_features)\n', (5227, 5253), True, 'import tensorflow as tf\n'), ((5737, 5772), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), '(input_file)\n', (5760, 5772), True, 'import tensorflow as tf\n'), ((7132, 7157), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (7149, 7157), True, 'import tensorflow as tf\n'), ((7283, 7340), 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), '(output_layer, output_weights, transpose_b=True)\n', (7292, 7340), True, 'import tensorflow as tf\n'), ((7354, 7408), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {'name': '"""cls_logits"""'}), "(logits, output_bias, name='cls_logits')\n", (7368, 7408), True, 'import tensorflow as tf\n'), ((7429, 7485), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)', 'name': '"""cls_probabilities"""'}), "(logits, axis=-1, name='cls_probabilities')\n", (7442, 7485), True, 'import tensorflow as tf\n'), ((7502, 7536), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (7519, 7536), True, 'import tensorflow as tf\n'), ((7559, 7613), 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': 'num_labels', 'dtype': 'tf.float32'}), '(labels, depth=num_labels, dtype=tf.float32)\n', (7569, 7613), True, 'import tensorflow as tf\n'), ((7730, 7779), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {'name': '"""cls_loss"""'}), "(per_example_loss, name='cls_loss')\n", (7744, 7779), True, 'import tensorflow as tf\n'), ((8149, 8177), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (8159, 8177), True, 'import tensorflow as tf\n'), ((8206, 8250), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'shape', '"""input_ids"""'], {}), "(tf.int32, shape, 'input_ids')\n", (8220, 8250), True, 'import tensorflow as tf\n'), ((8268, 8313), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'shape', '"""input_mask"""'], {}), "(tf.int32, shape, 'input_mask')\n", (8282, 8313), True, 'import tensorflow as tf\n'), ((8332, 8378), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'shape', '"""segment_ids"""'], {}), "(tf.int32, shape, 'segment_ids')\n", (8346, 8378), True, 'import tensorflow as tf\n'), ((8395, 8438), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'None', '"""label_ids"""'], {}), "(tf.int32, None, 'label_ids')\n", (8409, 8438), True, 'import tensorflow as tf\n'), ((8587, 8611), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (8609, 8611), True, 'import tensorflow as tf\n'), ((8663, 8730), 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), '(tvars, init_checkpoint)\n', (8706, 8730), False, 'import modeling\n'), ((8735, 8797), 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), '(init_checkpoint, assignment_map)\n', (8764, 8797), True, 'import tensorflow as tf\n'), ((8874, 8932), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""**** Trainable Variables ****"""'], {}), "('**** Trainable Variables ****')\n", (8899, 8932), True, 'import tensorflow as tf\n'), ((9552, 9818), 'tensorflow.python.compiler.tensorrt.trt_convert.TrtGraphConverter', 'trt.TrtGraphConverter', ([], {'input_graph_def': 'frozen_graph', 'nodes_blacklist': 'output_node_names', 'max_workspace_size_bytes': '((4096 << 20) - 1000)', 'precision_mode': "('FP16' if FLAGS.amp else 'FP32')", 'minimum_segment_size': '(4)', 'is_dynamic_op': '(True)', 'maximum_cached_engines': '(1000)'}), "(input_graph_def=frozen_graph, nodes_blacklist=\n output_node_names, max_workspace_size_bytes=(4096 << 20) - 1000,\n precision_mode='FP16' if FLAGS.amp else 'FP32', minimum_segment_size=4,\n is_dynamic_op=True, maximum_cached_engines=1000)\n", (9573, 9818), True, 'from tensorflow.python.compiler.tensorrt import trt_convert as trt\n'), ((12163, 12208), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""*** Features ***"""'], {}), "('*** Features ***')\n", (12188, 12208), True, 'import tensorflow as tf\n'), ((12213, 12258), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""*** Features ***"""'], {}), "('*** Features ***')\n", (12238, 12258), True, 'import tensorflow as tf\n'), ((13898, 13922), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (13920, 13922), True, 'import tensorflow as tf\n'), ((17951, 17961), 'horovod.tensorflow.init', 'hvd.init', ([], {}), '()\n', (17959, 17961), True, 'import horovod.tensorflow as hvd\n'), ((19193, 19256), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Multi-GPU training with TF Horovod"""'], {}), "('Multi-GPU training with TF Horovod')\n", (19218, 19256), True, 'import tensorflow as tf\n'), ((19499, 19509), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (19507, 19509), True, 'import horovod.tensorflow as hvd\n'), ((20293, 20348), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Configuaration *****"""'], {}), "('***** Configuaration *****')\n", (20318, 20348), True, 'import tensorflow as tf\n'), ((20475, 20530), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""**************************"""'], {}), "('**************************')\n", (20500, 20530), True, 'import tensorflow as tf\n'), ((20631, 20734), 'utils.utils.LogTrainRunHook', 'LogTrainRunHook', (['global_batch_size', 'hvd_rank', 'FLAGS.save_checkpoints_steps'], {'num_steps_ignore_xla': '(25)'}), '(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps,\n num_steps_ignore_xla=25)\n', (20646, 20734), False, 'from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags\n'), ((22374, 22431), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Running training *****"""'], {}), "('***** Running training *****')\n", (22399, 22431), True, 'import tensorflow as tf\n'), ((22510, 22580), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), "(' Batch size = %d', FLAGS.train_batch_size)\n", (22535, 22580), True, 'import tensorflow as tf\n'), ((22585, 22647), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), "(' Num steps = %d', num_train_steps)\n", (22610, 22647), True, 'import tensorflow as tf\n'), ((22943, 22954), 'time.time', 'time.time', ([], {}), '()\n', (22952, 22954), False, 'import time\n'), ((24284, 24332), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval.tf_record"""'], {}), "(FLAGS.output_dir, 'eval.tf_record')\n", (24296, 24332), False, 'import os\n'), ((24462, 24521), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Running evaluation *****"""'], {}), "('***** Running evaluation *****')\n", (24487, 24521), True, 'import tensorflow as tf\n'), ((24599, 24668), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), "(' Batch size = %d', FLAGS.eval_batch_size)\n", (24624, 24668), True, 'import tensorflow as tf\n'), ((25015, 25026), 'time.time', 'time.time', ([], {}), '()\n', (25024, 25026), False, 'import time\n'), ((25438, 25456), 'numpy.mean', 'np.mean', (['time_list'], {}), '(time_list)\n', (25445, 25456), True, 'import numpy as np\n'), ((25814, 25872), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (25839, 25872), True, 'import tensorflow as tf\n'), ((25877, 26022), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Inference Time = %0.2f for Sentences = %d"""', 'eval_time_elapsed', '(eval_hooks[-1].count * FLAGS.eval_batch_size)'], {}), "('Total Inference Time = %0.2f for Sentences = %d',\n eval_time_elapsed, eval_hooks[-1].count * FLAGS.eval_batch_size)\n", (25902, 26022), True, 'import tensorflow as tf\n'), ((26043, 26179), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Inference Time W/O Overhead = %0.2f for Sentences = %d"""', 'eval_time_wo_overhead', 'num_sentences'], {}), "(\n 'Total Inference Time W/O Overhead = %0.2f for Sentences = %d',\n eval_time_wo_overhead, num_sentences)\n", (26068, 26179), True, 'import tensorflow as tf\n'), ((26195, 26264), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Summary Inference Statistics on EVAL set"""'], {}), "('Summary Inference Statistics on EVAL set')\n", (26220, 26264), True, 'import tensorflow as tf\n'), ((26269, 26336), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), "('Batch size = %d', FLAGS.eval_batch_size)\n", (26294, 26336), True, 'import tensorflow as tf\n'), ((26341, 26412), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Sequence Length = %d"""', 'FLAGS.max_seq_length'], {}), "('Sequence Length = %d', FLAGS.max_seq_length)\n", (26366, 26412), True, 'import tensorflow as tf\n'), ((26417, 26493), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Precision = %s"""', "('fp16' if FLAGS.amp else 'fp32')"], {}), "('Precision = %s', 'fp16' if FLAGS.amp else 'fp32')\n", (26442, 26493), True, 'import tensorflow as tf\n'), ((26498, 26585), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 50 (ms) = %0.2f"""', '(cf_50 * 1000)'], {}), "('Latency Confidence Level 50 (ms) = %0.2f', cf_50 *\n 1000)\n", (26523, 26585), True, 'import tensorflow as tf\n'), ((26586, 26673), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 90 (ms) = %0.2f"""', '(cf_90 * 1000)'], {}), "('Latency Confidence Level 90 (ms) = %0.2f', cf_90 *\n 1000)\n", (26611, 26673), True, 'import tensorflow as tf\n'), ((26674, 26761), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 95 (ms) = %0.2f"""', '(cf_95 * 1000)'], {}), "('Latency Confidence Level 95 (ms) = %0.2f', cf_95 *\n 1000)\n", (26699, 26761), True, 'import tensorflow as tf\n'), ((26762, 26849), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 99 (ms) = %0.2f"""', '(cf_99 * 1000)'], {}), "('Latency Confidence Level 99 (ms) = %0.2f', cf_99 *\n 1000)\n", (26787, 26849), True, 'import tensorflow as tf\n'), ((26850, 26940), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 100 (ms) = %0.2f"""', '(cf_100 * 1000)'], {}), "('Latency Confidence Level 100 (ms) = %0.2f', \n cf_100 * 1000)\n", (26875, 26940), True, 'import tensorflow as tf\n'), ((26940, 27009), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Average (ms) = %0.2f"""', '(avg * 1000)'], {}), "('Latency Average (ms) = %0.2f', avg * 1000)\n", (26965, 27009), True, 'import tensorflow as tf\n'), ((27014, 27114), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Throughput Average (sentences/sec) = %0.2f"""', 'ss_sentences_per_second'], {}), "('Throughput Average (sentences/sec) = %0.2f',\n ss_sentences_per_second)\n", (27039, 27114), True, 'import tensorflow as tf\n'), ((27228, 27286), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (27253, 27286), True, 'import tensorflow as tf\n'), ((27312, 27362), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), "(FLAGS.output_dir, 'eval_results.txt')\n", (27324, 27362), False, 'import os\n'), ((27882, 27933), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""predict.tf_record"""'], {}), "(FLAGS.output_dir, 'predict.tf_record')\n", (27894, 27933), False, 'import os\n'), ((28148, 28206), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Running prediction*****"""'], {}), "('***** Running prediction*****')\n", (28173, 28206), True, 'import tensorflow as tf\n'), ((28287, 28359), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), "(' Batch size = %d', FLAGS.predict_batch_size)\n", (28312, 28359), True, 'import tensorflow as tf\n'), ((28730, 28741), 'time.time', 'time.time', ([], {}), '()\n', (28739, 28741), False, 'import time\n'), ((28769, 28819), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""test_results.tsv"""'], {}), "(FLAGS.output_dir, 'test_results.tsv')\n", (28781, 28819), False, 'import os\n'), ((29626, 29644), 'numpy.mean', 'np.mean', (['time_list'], {}), '(time_list)\n', (29633, 29644), True, 'import numpy as np\n'), ((30005, 30063), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (30030, 30063), True, 'import tensorflow as tf\n'), ((30068, 30222), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Inference Time = %0.2f for Sentences = %d"""', 'predict_time_elapsed', '(predict_hooks[-1].count * FLAGS.predict_batch_size)'], {}), "('Total Inference Time = %0.2f for Sentences = %d',\n predict_time_elapsed, predict_hooks[-1].count * FLAGS.predict_batch_size)\n", (30093, 30222), True, 'import tensorflow as tf\n'), ((30243, 30382), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Inference Time W/O Overhead = %0.2f for Sentences = %d"""', 'predict_time_wo_overhead', 'num_sentences'], {}), "(\n 'Total Inference Time W/O Overhead = %0.2f for Sentences = %d',\n predict_time_wo_overhead, num_sentences)\n", (30268, 30382), True, 'import tensorflow as tf\n'), ((30408, 30477), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Summary Inference Statistics on TEST SET"""'], {}), "('Summary Inference Statistics on TEST SET')\n", (30433, 30477), True, 'import tensorflow as tf\n'), ((30482, 30552), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), "('Batch size = %d', FLAGS.predict_batch_size)\n", (30507, 30552), True, 'import tensorflow as tf\n'), ((30557, 30628), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Sequence Length = %d"""', 'FLAGS.max_seq_length'], {}), "('Sequence Length = %d', FLAGS.max_seq_length)\n", (30582, 30628), True, 'import tensorflow as tf\n'), ((30633, 30709), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Precision = %s"""', "('fp16' if FLAGS.amp else 'fp32')"], {}), "('Precision = %s', 'fp16' if FLAGS.amp else 'fp32')\n", (30658, 30709), True, 'import tensorflow as tf\n'), ((30714, 30801), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 50 (ms) = %0.2f"""', '(cf_50 * 1000)'], {}), "('Latency Confidence Level 50 (ms) = %0.2f', cf_50 *\n 1000)\n", (30739, 30801), True, 'import tensorflow as tf\n'), ((30802, 30889), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 90 (ms) = %0.2f"""', '(cf_90 * 1000)'], {}), "('Latency Confidence Level 90 (ms) = %0.2f', cf_90 *\n 1000)\n", (30827, 30889), True, 'import tensorflow as tf\n'), ((30890, 30977), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 95 (ms) = %0.2f"""', '(cf_95 * 1000)'], {}), "('Latency Confidence Level 95 (ms) = %0.2f', cf_95 *\n 1000)\n", (30915, 30977), True, 'import tensorflow as tf\n'), ((30978, 31065), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 99 (ms) = %0.2f"""', '(cf_99 * 1000)'], {}), "('Latency Confidence Level 99 (ms) = %0.2f', cf_99 *\n 1000)\n", (31003, 31065), True, 'import tensorflow as tf\n'), ((31066, 31156), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 100 (ms) = %0.2f"""', '(cf_100 * 1000)'], {}), "('Latency Confidence Level 100 (ms) = %0.2f', \n cf_100 * 1000)\n", (31091, 31156), True, 'import tensorflow as tf\n'), ((31156, 31225), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Average (ms) = %0.2f"""', '(avg * 1000)'], {}), "('Latency Average (ms) = %0.2f', avg * 1000)\n", (31181, 31225), True, 'import tensorflow as tf\n'), ((31230, 31330), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Throughput Average (sentences/sec) = %0.2f"""', 'ss_sentences_per_second'], {}), "('Throughput Average (sentences/sec) = %0.2f',\n ss_sentences_per_second)\n", (31255, 31330), True, 'import tensorflow as tf\n'), ((31444, 31502), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (31469, 31502), True, 'import tensorflow as tf\n'), ((6973, 7017), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (7004, 7017), True, 'import tensorflow as tf\n'), ((7100, 7122), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (7120, 7122), True, 'import tensorflow as tf\n'), ((7226, 7268), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '(0.9)'}), '(output_layer, keep_prob=0.9)\n', (7239, 7268), True, 'import tensorflow as tf\n'), ((7639, 7718), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)', 'name': '"""cls_per_example_loss"""'}), "(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss')\n", (7652, 7718), True, 'import tensorflow as tf\n'), ((8814, 8847), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8845, 8847), True, 'import tensorflow as tf\n'), ((10147, 10192), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['"""frozen_modelTRT.pb"""', '"""wb"""'], {}), "('frozen_modelTRT.pb', 'wb')\n", (10164, 10192), True, 'import tensorflow as tf\n'), ((10741, 10789), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), '(logits, axis=-1, output_type=tf.int32)\n', (10750, 10789), True, 'import tensorflow as tf\n'), ((12306, 12394), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), "(' name = %s, shape = %s' % (name, features[name]\n .shape))\n", (12331, 12394), True, 'import tensorflow as tf\n'), ((12840, 13126), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['trt_graph'], {'input_map': "{'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids':\n segment_ids, 'label_ids': label_ids}", 'return_elements': "['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0',\n 'loss/cls_probabilities:0']", 'name': '""""""'}), "(trt_graph, input_map={'input_ids': input_ids,\n 'input_mask': input_mask, 'segment_ids': segment_ids, 'label_ids':\n label_ids}, return_elements=['loss/cls_loss:0',\n 'loss/cls_per_example_loss:0', 'loss/cls_logits:0',\n 'loss/cls_probabilities:0'], name='')\n", (12859, 13126), True, 'import tensorflow as tf\n'), ((14080, 14147), 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), '(tvars, init_checkpoint)\n', (14123, 14147), False, 'import modeling\n'), ((14154, 14216), 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), '(init_checkpoint, assignment_map)\n', (14183, 14216), True, 'import tensorflow as tf\n'), ((14256, 14314), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""**** Trainable Variables ****"""'], {}), "('**** Trainable Variables ****')\n", (14281, 14314), True, 'import tensorflow as tf\n'), ((14679, 14853), 'optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'hvd', '(False)', 'FLAGS.amp', 'FLAGS.num_accumulation_steps', 'FLAGS.optimizer_type'], {}), '(total_loss, learning_rate, num_train_steps,\n num_warmup_steps, hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps,\n FLAGS.optimizer_type)\n', (14708, 14853), False, 'import optimization\n'), ((14887, 14960), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op'}), '(mode=mode, loss=total_loss, train_op=train_op)\n', (14913, 14960), True, 'import tensorflow as tf\n'), ((19324, 19334), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (19332, 19334), True, 'import horovod.tensorflow as hvd\n'), ((19336, 19346), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (19344, 19346), True, 'import horovod.tensorflow as hvd\n'), ((19430, 19440), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (19438, 19440), True, 'import horovod.tensorflow as hvd\n'), ((19465, 19475), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (19473, 19475), True, 'import horovod.tensorflow as hvd\n'), ((19561, 19577), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (19575, 19577), True, 'import horovod.tensorflow as hvd\n'), ((19598, 19614), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (19612, 19614), True, 'import horovod.tensorflow as hvd\n'), ((19625, 19635), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (19633, 19635), True, 'import horovod.tensorflow as hvd\n'), ((19851, 19881), 'tensorflow.enable_resource_variables', 'tf.enable_resource_variables', ([], {}), '()\n', (19879, 19881), True, 'import tensorflow as tf\n'), ((21069, 21118), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""train.tf_record"""'], {}), "(FLAGS.output_dir, 'train.tf_record')\n", (21081, 21118), False, 'import os\n'), ((23074, 23085), 'time.time', 'time.time', ([], {}), '()\n', (23083, 23085), False, 'import time\n'), ((23427, 23485), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (23452, 23485), True, 'import tensorflow as tf\n'), ((23494, 23630), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Training Time = %0.2f for Sentences = %d"""', 'train_time_elapsed', '(num_train_steps * global_batch_size)'], {}), "('Total Training Time = %0.2f for Sentences = %d',\n train_time_elapsed, num_train_steps * global_batch_size)\n", (23519, 23630), True, 'import tensorflow as tf\n'), ((23659, 23862), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Training Time W/O Overhead = %0.2f for Sentences = %d"""', 'train_time_wo_overhead', '((training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size)'], {}), "(\n 'Total Training Time W/O Overhead = %0.2f for Sentences = %d',\n train_time_wo_overhead, (training_hooks[-1].count - training_hooks[-1].\n skipped) * global_batch_size)\n", (23684, 23862), True, 'import tensorflow as tf\n'), ((23881, 24001), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Throughput Average (sentences/sec) with overhead = %0.2f"""', 'avg_sentences_per_second'], {}), "(\n 'Throughput Average (sentences/sec) with overhead = %0.2f',\n avg_sentences_per_second)\n", (23906, 24001), True, 'import tensorflow as tf\n'), ((24001, 24101), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Throughput Average (sentences/sec) = %0.2f"""', 'ss_sentences_per_second'], {}), "('Throughput Average (sentences/sec) = %0.2f',\n ss_sentences_per_second)\n", (24026, 24101), True, 'import tensorflow as tf\n'), ((24106, 24164), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (24131, 24164), True, 'import tensorflow as tf\n'), ((24954, 24991), 'utils.utils.LogEvalRunHook', 'LogEvalRunHook', (['FLAGS.eval_batch_size'], {}), '(FLAGS.eval_batch_size)\n', (24968, 24991), False, 'from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags\n'), ((25126, 25137), 'time.time', 'time.time', ([], {}), '()\n', (25135, 25137), False, 'import time\n'), ((27372, 27412), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['output_eval_file', '"""w"""'], {}), "(output_eval_file, 'w')\n", (27389, 27412), True, 'import tensorflow as tf\n'), ((27430, 27483), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Eval results *****"""'], {}), "('***** Eval results *****')\n", (27455, 27483), True, 'import tensorflow as tf\n'), ((28663, 28703), 'utils.utils.LogEvalRunHook', 'LogEvalRunHook', (['FLAGS.predict_batch_size'], {}), '(FLAGS.predict_batch_size)\n', (28677, 28703), False, 'from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags\n'), ((28829, 28872), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['output_predict_file', '"""w"""'], {}), "(output_predict_file, 'w')\n", (28846, 28872), True, 'import tensorflow as tf\n'), ((28892, 28948), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Predict results *****"""'], {}), "('***** Predict results *****')\n", (28917, 28948), True, 'import tensorflow as tf\n'), ((29302, 29313), 'time.time', 'time.time', ([], {}), '()\n', (29311, 29313), False, 'import time\n'), ((5470, 5484), 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), '(t)\n', (5481, 5484), True, 'import tensorflow as tf\n'), ((9141, 9233), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), "(' name = %s, shape = %s%s', var.name, var.shape,\n init_string)\n", (9166, 9233), True, 'import tensorflow as tf\n'), ((10846, 10915), 'tensorflow.metrics.false_negatives', 'tf.metrics.false_negatives', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (10872, 10915), True, 'import tensorflow as tf\n'), ((10940, 11009), 'tensorflow.metrics.false_positives', 'tf.metrics.false_positives', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (10966, 11009), True, 'import tensorflow as tf\n'), ((11034, 11102), 'tensorflow.metrics.true_positives', 'tf.metrics.true_positives', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (11059, 11102), True, 'import tensorflow as tf\n'), ((11127, 11195), 'tensorflow.metrics.true_negatives', 'tf.metrics.true_negatives', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (11152, 11195), True, 'import tensorflow as tf\n'), ((13290, 13352), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), '(mode=mode, predictions=predictions)\n', (13316, 13352), True, 'import tensorflow as tf\n'), ((14478, 14570), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), "(' name = %s, shape = %s%s', var.name, var.shape,\n init_string)\n", (14503, 14570), True, 'import tensorflow as tf\n'), ((15054, 15064), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15062, 15064), True, 'import tensorflow as tf\n'), ((15472, 15564), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metric_ops': 'eval_metric_ops'}), '(mode=mode, loss=total_loss, eval_metric_ops=\n eval_metric_ops)\n', (15498, 15564), True, 'import tensorflow as tf\n'), ((15618, 15628), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15626, 15628), True, 'import tensorflow as tf\n'), ((15890, 15954), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'probabilities'}), '(mode=mode, predictions=probabilities)\n', (15916, 15954), True, 'import tensorflow as tf\n'), ((16970, 17046), 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), '(all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32)\n', (16981, 17046), True, 'import tensorflow as tf\n'), ((17115, 17192), 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), '(all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32)\n', (17126, 17192), True, 'import tensorflow as tf\n'), ((17278, 17356), 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), '(all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32)\n', (17289, 17356), True, 'import tensorflow as tf\n'), ((17440, 17504), 'tensorflow.constant', 'tf.constant', (['all_label_ids'], {'shape': '[num_examples]', 'dtype': 'tf.int32'}), '(all_label_ids, shape=[num_examples], dtype=tf.int32)\n', (17451, 17504), True, 'import tensorflow as tf\n'), ((19673, 19708), 'horovod.tensorflow.BroadcastGlobalVariablesHook', 'hvd.BroadcastGlobalVariablesHook', (['(0)'], {}), '(0)\n', (19705, 19708), True, 'import horovod.tensorflow as hvd\n'), ((21309, 21319), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (21317, 21319), True, 'import horovod.tensorflow as hvd\n'), ((21360, 21370), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (21368, 21370), True, 'import horovod.tensorflow as hvd\n'), ((21380, 21390), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (21388, 21390), True, 'import horovod.tensorflow as hvd\n'), ((5831, 5841), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (5839, 5841), True, 'import horovod.tensorflow as hvd\n'), ((5843, 5853), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (5851, 5853), True, 'import horovod.tensorflow as hvd\n'), ((11350, 11378), 'tensorflow.identity', 'tf.identity', (['MCC'], {'name': '"""MCC"""'}), "(MCC, name='MCC')\n", (11361, 11378), True, 'import tensorflow as tf\n'), ((11479, 11541), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (11498, 11541), True, 'import tensorflow as tf\n'), ((11578, 11618), 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'per_example_loss'}), '(values=per_example_loss)\n', (11593, 11618), True, 'import tensorflow as tf\n'), ((11636, 11728), 'tf_metrics.f1', 'tf_metrics.f1', ([], {'labels': 'label_ids', 'predictions': 'predictions', 'num_classes': '(2)', 'pos_indices': '[1]'}), '(labels=label_ids, predictions=predictions, num_classes=2,\n pos_indices=[1])\n', (11649, 11728), False, 'import tf_metrics\n'), ((11906, 11968), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (11925, 11968), True, 'import tensorflow as tf\n'), ((12005, 12045), 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'per_example_loss'}), '(values=per_example_loss)\n', (12020, 12045), True, 'import tensorflow as tf\n'), ((13522, 13614), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metric_ops': 'eval_metric_ops'}), '(mode=mode, loss=total_loss, eval_metric_ops=\n eval_metric_ops)\n', (13548, 13614), True, 'import tensorflow as tf\n'), ((14002, 14012), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (14010, 14012), True, 'import horovod.tensorflow as hvd\n'), ((15190, 15229), 'tensorflow.train.experimental.FixedLossScale', 'tf.train.experimental.FixedLossScale', (['(1)'], {}), '(1)\n', (15226, 15229), True, 'import tensorflow as tf\n'), ((17572, 17582), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (17580, 17582), True, 'import horovod.tensorflow as hvd\n'), ((17584, 17594), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (17592, 17594), True, 'import horovod.tensorflow as hvd\n'), ((21426, 21436), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (21434, 21436), True, 'import horovod.tensorflow as hvd\n'), ((21921, 21931), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (21929, 21931), True, 'import horovod.tensorflow as hvd\n'), ((15321, 15366), 'optimization.LAMBOptimizer', 'optimization.LAMBOptimizer', ([], {'learning_rate': '(0.0)'}), '(learning_rate=0.0)\n', (15347, 15366), False, 'import optimization\n'), ((15823, 15868), 'optimization.LAMBOptimizer', 'optimization.LAMBOptimizer', ([], {'learning_rate': '(0.0)'}), '(learning_rate=0.0)\n', (15849, 15868), False, 'import optimization\n'), ((21243, 21253), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (21251, 21253), True, 'import horovod.tensorflow as hvd\n'), ((21559, 21569), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (21567, 21569), True, 'import horovod.tensorflow as hvd\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test cases for subject-replicator"""
import sys
from subject.tests import functional
from subject.tests.utils import execute
class TestGlanceReplicator(functional.FunctionalTest):
"""Functional tests for subject-replicator"""
def test_compare(self):
# Test for issue: https://bugs.launchpad.net/glance/+bug/1598928
cmd = ('%s -m subject.cmd.replicator '
'compare az1:9292 az2:9292 --debug' %
(sys.executable,))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertIn(
'Request: GET http://az1:9292/v1/subjects/detail?is_public=None',
err
)
| [
"subject.tests.utils.execute"
] | [((1086, 1117), 'subject.tests.utils.execute', 'execute', (['cmd'], {'raise_error': '(False)'}), '(cmd, raise_error=False)\n', (1093, 1117), False, 'from subject.tests.utils import execute\n')] |
from devito import VectorTimeFunction, TimeFunction, NODE
from devito.tools import memoized_meth
from examples.seismic import PointSource
from examples.seismic.viscoacoustic.operators import (ForwardOperator, AdjointOperator)
class ViscoacousticWaveSolver(object):
"""
Solver object that provides operators for seismic inversion problems
and encapsulates the time and space discretization for a given problem
setup.
Parameters
----------
model : Model
Physical model with domain parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Order of the spatial stencil discretisation. Defaults to 4.
kernel : selects a visco-acoustic equation from the options below:
'sls' (Standard Linear Solid) :
1st order - Blanch and Symes (1995) / Dutta and Schuster (2014)
viscoacoustic equation
2nd order - Bai et al. (2014) viscoacoustic equation
'ren' - Ren et al. (2014) viscoacoustic equation
'deng_mcmechan' - Deng and McMechan (2007) viscoacoustic equation
Defaults to 'sls' 2nd order.
"""
def __init__(self, model, geometry, space_order=4, kernel='sls', time_order=2,
**kwargs):
self.model = model
self.model._initialize_bcs(bcs="mask")
self.geometry = geometry
self.space_order = space_order
self.kernel = kernel
self.time_order = time_order
self._kwargs = kwargs
@property
def dt(self):
return self.model.critical_dt
@memoized_meth
def op_fwd(self, save=None):
"""Cached operator for forward runs with buffered wavefield"""
return ForwardOperator(self.model, save=save, geometry=self.geometry,
space_order=self.space_order, kernel=self.kernel,
time_order=self.time_order, **self._kwargs)
@memoized_meth
def op_adj(self):
"""Cached operator for adjoint runs"""
return AdjointOperator(self.model, save=None, geometry=self.geometry,
space_order=self.space_order, kernel=self.kernel,
time_order=self.time_order, **self._kwargs)
def forward(self, src=None, rec=None, v=None, r=None, p=None, qp=None, b=None,
vp=None, save=None, **kwargs):
"""
Forward modelling function that creates the necessary
data objects for running a forward modelling operator.
Parameters
----------
src : SparseTimeFunction or array_like, optional
Time series data for the injected source term.
rec : SparseTimeFunction or array_like, optional
The interpolated receiver data.
v : VectorTimeFunction, optional
The computed particle velocity.
r : TimeFunction, optional
The computed memory variable.
p : TimeFunction, optional
Stores the computed wavefield.
qp : Function, optional
The P-wave quality factor.
b : Function, optional
The time-constant inverse density.
vp : Function or float, optional
The time-constant velocity.
save : bool, optional
Whether or not to save the entire (unrolled) wavefield.
Returns
-------
Receiver, wavefield and performance summary
"""
# Source term is read-only, so re-use the default
src = src or self.geometry.src
# Create a new receiver object to store the result
rec = rec or self.geometry.rec
# Create all the fields v, p, r
save_t = src.nt if save else None
if self.time_order == 1:
v = v or VectorTimeFunction(name="v", grid=self.model.grid, save=save_t,
time_order=self.time_order,
space_order=self.space_order)
kwargs.update({k.name: k for k in v})
# Create the forward wavefield if not provided
p = p or TimeFunction(name="p", grid=self.model.grid, save=save_t,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Memory variable:
r = r or TimeFunction(name="r", grid=self.model.grid, save=save_t,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Pick physical parameters from model unless explicitly provided
b = b or self.model.b
qp = qp or self.model.qp
# Pick vp from model unless explicitly provided
vp = vp or self.model.vp
if self.kernel == 'sls':
# Execute operator and return wavefield and receiver data
# With Memory variable
summary = self.op_fwd(save).apply(src=src, rec=rec, qp=qp, r=r,
p=p, b=b, vp=vp,
dt=kwargs.pop('dt', self.dt), **kwargs)
else:
# Execute operator and return wavefield and receiver data
# Without Memory variable
summary = self.op_fwd(save).apply(src=src, rec=rec, qp=qp, p=p,
b=b, vp=vp,
dt=kwargs.pop('dt', self.dt), **kwargs)
return rec, p, v, summary
def adjoint(self, rec, srca=None, va=None, pa=None, vp=None, qp=None, b=None, r=None,
**kwargs):
"""
Adjoint modelling function that creates the necessary
data objects for running an adjoint modelling operator.
Parameters
----------
rec : SparseTimeFunction or array-like
The receiver data. Please note that
these act as the source term in the adjoint run.
srca : SparseTimeFunction or array-like
The resulting data for the interpolated at the
original source location.
va : VectorTimeFunction, optional
The computed particle velocity.
pa : TimeFunction, optional
Stores the computed wavefield.
vp : Function or float, optional
The time-constant velocity.
qp : Function, optional
The P-wave quality factor.
b : Function, optional
The time-constant inverse density.
r : TimeFunction, optional
The computed memory variable.
Returns
-------
Adjoint source, wavefield and performance summary.
"""
# Create a new adjoint source and receiver symbol
srca = srca or PointSource(name='srca', grid=self.model.grid,
time_range=self.geometry.time_axis,
coordinates=self.geometry.src_positions)
if self.time_order == 1:
va = va or VectorTimeFunction(name="va", grid=self.model.grid,
time_order=self.time_order,
space_order=self.space_order)
kwargs.update({k.name: k for k in va})
pa = pa or TimeFunction(name="pa", grid=self.model.grid,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Memory variable:
r = r or TimeFunction(name="r", grid=self.model.grid, time_order=self.time_order,
space_order=self.space_order, staggered=NODE)
b = b or self.model.b
qp = qp or self.model.qp
# Pick vp from model unless explicitly provided
vp = vp or self.model.vp
# Execute operator and return wavefield and receiver data
if self.kernel == 'sls':
# Execute operator and return wavefield and receiver data
# With Memory variable
summary = self.op_adj().apply(src=srca, rec=rec, pa=pa, r=r, b=b, vp=vp,
qp=qp, dt=kwargs.pop('dt', self.dt), **kwargs)
else:
summary = self.op_adj().apply(src=srca, rec=rec, pa=pa, vp=vp, b=b, qp=qp,
dt=kwargs.pop('dt', self.dt), **kwargs)
return srca, pa, va, summary
| [
"examples.seismic.PointSource",
"examples.seismic.viscoacoustic.operators.ForwardOperator",
"devito.TimeFunction",
"devito.VectorTimeFunction",
"examples.seismic.viscoacoustic.operators.AdjointOperator"
] | [((1869, 2035), 'examples.seismic.viscoacoustic.operators.ForwardOperator', 'ForwardOperator', (['self.model'], {'save': 'save', 'geometry': 'self.geometry', 'space_order': 'self.space_order', 'kernel': 'self.kernel', 'time_order': 'self.time_order'}), '(self.model, save=save, geometry=self.geometry, space_order=\n self.space_order, kernel=self.kernel, time_order=self.time_order, **\n self._kwargs)\n', (1884, 2035), False, 'from examples.seismic.viscoacoustic.operators import ForwardOperator, AdjointOperator\n'), ((2192, 2358), 'examples.seismic.viscoacoustic.operators.AdjointOperator', 'AdjointOperator', (['self.model'], {'save': 'None', 'geometry': 'self.geometry', 'space_order': 'self.space_order', 'kernel': 'self.kernel', 'time_order': 'self.time_order'}), '(self.model, save=None, geometry=self.geometry, space_order=\n self.space_order, kernel=self.kernel, time_order=self.time_order, **\n self._kwargs)\n', (2207, 2358), False, 'from examples.seismic.viscoacoustic.operators import ForwardOperator, AdjointOperator\n'), ((4259, 4395), 'devito.TimeFunction', 'TimeFunction', ([], {'name': '"""p"""', 'grid': 'self.model.grid', 'save': 'save_t', 'time_order': 'self.time_order', 'space_order': 'self.space_order', 'staggered': 'NODE'}), "(name='p', grid=self.model.grid, save=save_t, time_order=self.\n time_order, space_order=self.space_order, staggered=NODE)\n", (4271, 4395), False, 'from devito import VectorTimeFunction, TimeFunction, NODE\n'), ((4496, 4632), 'devito.TimeFunction', 'TimeFunction', ([], {'name': '"""r"""', 'grid': 'self.model.grid', 'save': 'save_t', 'time_order': 'self.time_order', 'space_order': 'self.space_order', 'staggered': 'NODE'}), "(name='r', grid=self.model.grid, save=save_t, time_order=self.\n time_order, space_order=self.space_order, staggered=NODE)\n", (4508, 4632), False, 'from devito import VectorTimeFunction, TimeFunction, NODE\n'), ((6908, 7036), 'examples.seismic.PointSource', 'PointSource', ([], {'name': '"""srca"""', 'grid': 'self.model.grid', 'time_range': 'self.geometry.time_axis', 'coordinates': 'self.geometry.src_positions'}), "(name='srca', grid=self.model.grid, time_range=self.geometry.\n time_axis, coordinates=self.geometry.src_positions)\n", (6919, 7036), False, 'from examples.seismic import PointSource\n'), ((7424, 7547), 'devito.TimeFunction', 'TimeFunction', ([], {'name': '"""pa"""', 'grid': 'self.model.grid', 'time_order': 'self.time_order', 'space_order': 'self.space_order', 'staggered': 'NODE'}), "(name='pa', grid=self.model.grid, time_order=self.time_order,\n space_order=self.space_order, staggered=NODE)\n", (7436, 7547), False, 'from devito import VectorTimeFunction, TimeFunction, NODE\n'), ((7653, 7775), 'devito.TimeFunction', 'TimeFunction', ([], {'name': '"""r"""', 'grid': 'self.model.grid', 'time_order': 'self.time_order', 'space_order': 'self.space_order', 'staggered': 'NODE'}), "(name='r', grid=self.model.grid, time_order=self.time_order,\n space_order=self.space_order, staggered=NODE)\n", (7665, 7775), False, 'from devito import VectorTimeFunction, TimeFunction, NODE\n'), ((3934, 4060), 'devito.VectorTimeFunction', 'VectorTimeFunction', ([], {'name': '"""v"""', 'grid': 'self.model.grid', 'save': 'save_t', 'time_order': 'self.time_order', 'space_order': 'self.space_order'}), "(name='v', grid=self.model.grid, save=save_t, time_order=\n self.time_order, space_order=self.space_order)\n", (3952, 4060), False, 'from devito import VectorTimeFunction, TimeFunction, NODE\n'), ((7159, 7273), 'devito.VectorTimeFunction', 'VectorTimeFunction', ([], {'name': '"""va"""', 'grid': 'self.model.grid', 'time_order': 'self.time_order', 'space_order': 'self.space_order'}), "(name='va', grid=self.model.grid, time_order=self.\n time_order, space_order=self.space_order)\n", (7177, 7273), False, 'from devito import VectorTimeFunction, TimeFunction, NODE\n')] |
import pandas as pd
import os
from tqdm import tqdm
from collections import defaultdict
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
dataPath = "data/static"
itemSetList = []
def loadDataSet():
with open(os.path.join(dataPath, "aprioriData.csv"), 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
cates = line.split(' ')
itemSetList.append(list(map(int, cates)))
def myApriori():
te = TransactionEncoder()
te_ary = te.fit(itemSetList).transform(itemSetList)
df = pd.DataFrame(te_ary, columns=te.columns_)
return df
def dataInit():
if os.path.exists(os.path.join(dataPath, "aprioriData.csv")):
return
df = pd.read_csv("data/static/static.csv")
user_category = defaultdict(set)
for idx, row in tqdm(df.iterrows(), total=df.shape[0], desc="category data generate"):
user_category[row['USER_ID']].add(row['CATEGORY_ID'])
with open(os.path.join(dataPath, "aprioriData.csv"), 'w+') as f:
for k, v in tqdm(user_category.items()):
f.write(' '.join(sorted(list(map(str, v))))+'\n')
if __name__ == '__main__':
dataInit()
loadDataSet()
df = myApriori()
frequent_itemsets = apriori(df, min_support=0.0035, use_colnames=True)
frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x))
print(frequent_itemsets[(frequent_itemsets['length'] >= 2)])
| [
"pandas.read_csv",
"mlxtend.frequent_patterns.apriori",
"os.path.join",
"mlxtend.preprocessing.TransactionEncoder",
"collections.defaultdict",
"pandas.DataFrame"
] | [((514, 534), 'mlxtend.preprocessing.TransactionEncoder', 'TransactionEncoder', ([], {}), '()\n', (532, 534), False, 'from mlxtend.preprocessing import TransactionEncoder\n'), ((600, 641), 'pandas.DataFrame', 'pd.DataFrame', (['te_ary'], {'columns': 'te.columns_'}), '(te_ary, columns=te.columns_)\n', (612, 641), True, 'import pandas as pd\n'), ((764, 801), 'pandas.read_csv', 'pd.read_csv', (['"""data/static/static.csv"""'], {}), "('data/static/static.csv')\n", (775, 801), True, 'import pandas as pd\n'), ((822, 838), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (833, 838), False, 'from collections import defaultdict\n'), ((1279, 1329), 'mlxtend.frequent_patterns.apriori', 'apriori', (['df'], {'min_support': '(0.0035)', 'use_colnames': '(True)'}), '(df, min_support=0.0035, use_colnames=True)\n', (1286, 1329), False, 'from mlxtend.frequent_patterns import apriori\n'), ((696, 737), 'os.path.join', 'os.path.join', (['dataPath', '"""aprioriData.csv"""'], {}), "(dataPath, 'aprioriData.csv')\n", (708, 737), False, 'import os\n'), ((265, 306), 'os.path.join', 'os.path.join', (['dataPath', '"""aprioriData.csv"""'], {}), "(dataPath, 'aprioriData.csv')\n", (277, 306), False, 'import os\n'), ((1006, 1047), 'os.path.join', 'os.path.join', (['dataPath', '"""aprioriData.csv"""'], {}), "(dataPath, 'aprioriData.csv')\n", (1018, 1047), False, 'import os\n')] |
from kajiki import i18n
from flask import request
from flask_kajiki import render_template
# N. B. settting i18n.gettext would affect tests from all modules,
# so we test for request path that only functions from this module could set
def gettext(s):
if request.path == '/test_i18n':
return s.upper()
return s
i18n.gettext = gettext
def test_does_translations(app):
"""Callback interface is able to inject Translator filter"""
with app.test_request_context(path='/test_i18n'):
rendered = render_template('i18n.html')
# TODO DOCTYPE; see also render_args
expected = '<p>HELLO!</p>'
assert rendered == expected
| [
"flask_kajiki.render_template"
] | [((525, 553), 'flask_kajiki.render_template', 'render_template', (['"""i18n.html"""'], {}), "('i18n.html')\n", (540, 553), False, 'from flask_kajiki import render_template\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
import json
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from args import parse_args
import paddlenlp as ppnlp
from paddlenlp.data import Pad, Stack, Tuple, Dict
from paddlenlp.transformers import BertForQuestionAnswering, BertTokenizer
from paddlenlp.transformers import ErnieForQuestionAnswering, ErnieTokenizer
from paddlenlp.transformers import ErnieGramForQuestionAnswering, ErnieGramTokenizer
from paddlenlp.transformers import RobertaForQuestionAnswering, RobertaTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics.squad import squad_evaluate, compute_prediction
from paddlenlp.datasets import load_dataset
MODEL_CLASSES = {
"bert": (BertForQuestionAnswering, BertTokenizer),
"ernie": (ErnieForQuestionAnswering, ErnieTokenizer),
"ernie_gram": (ErnieGramForQuestionAnswering, ErnieGramTokenizer),
"roberta": (RobertaForQuestionAnswering, RobertaTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
paddle.seed(args.seed)
@paddle.no_grad()
def evaluate(model, data_loader, args):
model.eval()
all_start_logits = []
all_end_logits = []
tic_eval = time.time()
for batch in data_loader:
input_ids, token_type_ids = batch
start_logits_tensor, end_logits_tensor = model(input_ids,
token_type_ids)
for idx in range(start_logits_tensor.shape[0]):
if len(all_start_logits) % 1000 == 0 and len(all_start_logits):
print("Processing example: %d" % len(all_start_logits))
print('time per 1000:', time.time() - tic_eval)
tic_eval = time.time()
all_start_logits.append(start_logits_tensor.numpy()[idx])
all_end_logits.append(end_logits_tensor.numpy()[idx])
all_predictions, _, _ = compute_prediction(
data_loader.dataset.data, data_loader.dataset.new_data,
(all_start_logits, all_end_logits), False, args.n_best_size,
args.max_answer_length)
# Can also write all_nbest_json and scores_diff_json files if needed
with open('prediction.json', "w", encoding='utf-8') as writer:
writer.write(
json.dumps(
all_predictions, ensure_ascii=False, indent=4) + "\n")
squad_evaluate(
examples=data_loader.dataset.data,
preds=all_predictions,
is_whitespace_splited=False)
model.train()
class CrossEntropyLossForSQuAD(paddle.nn.Layer):
def __init__(self):
super(CrossEntropyLossForSQuAD, self).__init__()
def forward(self, y, label):
start_logits, end_logits = y
start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1)
start_loss = paddle.nn.functional.cross_entropy(
input=start_logits, label=start_position)
end_loss = paddle.nn.functional.cross_entropy(
input=end_logits, label=end_position)
loss = (start_loss + end_loss) / 2
return loss
def run(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
rank = paddle.distributed.get_rank()
task_name = args.task_name.lower()
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
set_seed(args)
if rank == 0:
if os.path.exists(args.model_name_or_path):
print("init checkpoint from %s" % args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
# NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
# that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
questions,
contexts,
stride=args.doc_stride,
max_seq_len=args.max_seq_length)
# Let's label those examples!
for i, tokenized_example in enumerate(tokenized_examples):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_example["input_ids"]
cls_index = input_ids.index(tokenizer.cls_token_id)
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offsets = tokenized_example['offset_mapping']
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
answers = examples[sample_index]['answers']
answer_starts = examples[sample_index]['answer_starts']
# Start/end character index of the answer in the text.
start_char = answer_starts[0]
end_char = start_char + len(answers[0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Minus one more to reach actual text
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and
offsets[token_end_index][1] >= end_char):
tokenized_examples[i]["start_positions"] = cls_index
tokenized_examples[i]["end_positions"] = cls_index
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[
token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples[i]["start_positions"] = token_start_index - 1
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples[i]["end_positions"] = token_end_index + 1
return tokenized_examples
if args.do_train:
if args.train_file:
train_ds = load_dataset(task_name, data_files=args.train_file)
else:
train_ds = load_dataset(task_name, splits='train')
train_ds.map(prepare_train_features, batched=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=args.batch_size, shuffle=True)
train_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
"token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
"start_positions": Stack(dtype="int64"),
"end_positions": Stack(dtype="int64")
}): fn(samples)
train_data_loader = DataLoader(
dataset=train_ds,
batch_sampler=train_batch_sampler,
collate_fn=train_batchify_fn,
return_list=True)
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
num_train_epochs = math.ceil(num_training_steps /
len(train_data_loader))
lr_scheduler = LinearDecayWithWarmup(
args.learning_rate, num_training_steps, args.warmup_proportion)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
criterion = CrossEntropyLossForSQuAD()
global_step = 0
tic_train = time.time()
for epoch in range(num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
input_ids, token_type_ids, start_positions, end_positions = batch
logits = model(
input_ids=input_ids, token_type_ids=token_type_ids)
loss = criterion(logits, (start_positions, end_positions))
if global_step % args.logging_steps == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch + 1, step + 1, loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if global_step % args.save_steps == 0 or global_step == num_training_steps:
if rank == 0:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
print('Saving checkpoint to:', output_dir)
if global_step == num_training_steps:
break
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
# NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
# that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
questions,
contexts,
stride=args.doc_stride,
max_seq_len=args.max_seq_length)
# For validation, there is no need to compute start and end positions
for i, tokenized_example in enumerate(tokenized_examples):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
tokenized_examples[i]["example_id"] = examples[sample_index]['id']
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples[i]["offset_mapping"] = [
(o if sequence_ids[k] == 1 else None)
for k, o in enumerate(tokenized_example["offset_mapping"])
]
return tokenized_examples
if args.do_predict and rank == 0:
if args.predict_file:
dev_ds = load_dataset(task_name, data_files=args.predict_file)
else:
dev_ds = load_dataset(task_name, splits='dev')
dev_ds.map(prepare_validation_features, batched=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=args.batch_size, shuffle=False)
dev_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
"token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id)
}): fn(samples)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
return_list=True)
evaluate(model, dev_data_loader, args)
if __name__ == "__main__":
args = parse_args()
run(args)
| [
"paddlenlp.data.Pad",
"paddlenlp.metrics.squad.compute_prediction",
"paddle.nn.functional.cross_entropy",
"paddle.seed",
"os.path.exists",
"paddle.no_grad",
"json.dumps",
"paddle.distributed.init_parallel_env",
"numpy.random.seed",
"paddle.io.DataLoader",
"paddle.set_device",
"paddle.io.BatchSampler",
"paddle.io.DistributedBatchSampler",
"paddle.distributed.get_world_size",
"paddlenlp.metrics.squad.squad_evaluate",
"args.parse_args",
"time.time",
"paddlenlp.transformers.LinearDecayWithWarmup",
"paddle.distributed.get_rank",
"os.makedirs",
"os.path.join",
"paddle.DataParallel",
"random.seed",
"paddlenlp.datasets.load_dataset",
"paddle.unsqueeze",
"paddlenlp.data.Stack"
] | [((1787, 1803), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (1801, 1803), False, 'import paddle\n'), ((1704, 1726), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1715, 1726), False, 'import random\n'), ((1731, 1756), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1745, 1756), True, 'import numpy as np\n'), ((1761, 1783), 'paddle.seed', 'paddle.seed', (['args.seed'], {}), '(args.seed)\n', (1772, 1783), False, 'import paddle\n'), ((1927, 1938), 'time.time', 'time.time', ([], {}), '()\n', (1936, 1938), False, 'import time\n'), ((2623, 2791), 'paddlenlp.metrics.squad.compute_prediction', 'compute_prediction', (['data_loader.dataset.data', 'data_loader.dataset.new_data', '(all_start_logits, all_end_logits)', '(False)', 'args.n_best_size', 'args.max_answer_length'], {}), '(data_loader.dataset.data, data_loader.dataset.new_data,\n (all_start_logits, all_end_logits), False, args.n_best_size, args.\n max_answer_length)\n', (2641, 2791), False, 'from paddlenlp.metrics.squad import squad_evaluate, compute_prediction\n'), ((3071, 3176), 'paddlenlp.metrics.squad.squad_evaluate', 'squad_evaluate', ([], {'examples': 'data_loader.dataset.data', 'preds': 'all_predictions', 'is_whitespace_splited': '(False)'}), '(examples=data_loader.dataset.data, preds=all_predictions,\n is_whitespace_splited=False)\n', (3085, 3176), False, 'from paddlenlp.metrics.squad import squad_evaluate, compute_prediction\n'), ((3895, 3925), 'paddle.set_device', 'paddle.set_device', (['args.device'], {}), '(args.device)\n', (3912, 3925), False, 'import paddle\n'), ((4032, 4061), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (4059, 4061), False, 'import paddle\n'), ((14771, 14783), 'args.parse_args', 'parse_args', ([], {}), '()\n', (14781, 14783), False, 'from args import parse_args\n'), ((3490, 3531), 'paddle.unsqueeze', 'paddle.unsqueeze', (['start_position'], {'axis': '(-1)'}), '(start_position, axis=-1)\n', (3506, 3531), False, 'import paddle\n'), ((3555, 3594), 'paddle.unsqueeze', 'paddle.unsqueeze', (['end_position'], {'axis': '(-1)'}), '(end_position, axis=-1)\n', (3571, 3594), False, 'import paddle\n'), ((3616, 3692), 'paddle.nn.functional.cross_entropy', 'paddle.nn.functional.cross_entropy', ([], {'input': 'start_logits', 'label': 'start_position'}), '(input=start_logits, label=start_position)\n', (3650, 3692), False, 'import paddle\n'), ((3725, 3797), 'paddle.nn.functional.cross_entropy', 'paddle.nn.functional.cross_entropy', ([], {'input': 'end_logits', 'label': 'end_position'}), '(input=end_logits, label=end_position)\n', (3759, 3797), False, 'import paddle\n'), ((3933, 3968), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (3966, 3968), False, 'import paddle\n'), ((3982, 4020), 'paddle.distributed.init_parallel_env', 'paddle.distributed.init_parallel_env', ([], {}), '()\n', (4018, 4020), False, 'import paddle\n'), ((4335, 4374), 'os.path.exists', 'os.path.exists', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (4349, 4374), False, 'import os\n'), ((4521, 4556), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (4554, 4556), False, 'import paddle\n'), ((4578, 4604), 'paddle.DataParallel', 'paddle.DataParallel', (['model'], {}), '(model)\n', (4597, 4604), False, 'import paddle\n'), ((8555, 8644), 'paddle.io.DistributedBatchSampler', 'paddle.io.DistributedBatchSampler', (['train_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_ds, batch_size=args.batch_size,\n shuffle=True)\n', (8588, 8644), False, 'import paddle\n'), ((9014, 9130), 'paddle.io.DataLoader', 'DataLoader', ([], {'dataset': 'train_ds', 'batch_sampler': 'train_batch_sampler', 'collate_fn': 'train_batchify_fn', 'return_list': '(True)'}), '(dataset=train_ds, batch_sampler=train_batch_sampler, collate_fn=\n train_batchify_fn, return_list=True)\n', (9024, 9130), False, 'from paddle.io import DataLoader\n'), ((9450, 9540), 'paddlenlp.transformers.LinearDecayWithWarmup', 'LinearDecayWithWarmup', (['args.learning_rate', 'num_training_steps', 'args.warmup_proportion'], {}), '(args.learning_rate, num_training_steps, args.\n warmup_proportion)\n', (9471, 9540), False, 'from paddlenlp.transformers import LinearDecayWithWarmup\n'), ((10190, 10201), 'time.time', 'time.time', ([], {}), '()\n', (10199, 10201), False, 'import time\n'), ((14188, 14261), 'paddle.io.BatchSampler', 'paddle.io.BatchSampler', (['dev_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dev_ds, batch_size=args.batch_size, shuffle=False)\n', (14210, 14261), False, 'import paddle\n'), ((14528, 14638), 'paddle.io.DataLoader', 'DataLoader', ([], {'dataset': 'dev_ds', 'batch_sampler': 'dev_batch_sampler', 'collate_fn': 'dev_batchify_fn', 'return_list': '(True)'}), '(dataset=dev_ds, batch_sampler=dev_batch_sampler, collate_fn=\n dev_batchify_fn, return_list=True)\n', (14538, 14638), False, 'from paddle.io import DataLoader\n'), ((8337, 8388), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['task_name'], {'data_files': 'args.train_file'}), '(task_name, data_files=args.train_file)\n', (8349, 8388), False, 'from paddlenlp.datasets import load_dataset\n'), ((8426, 8465), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['task_name'], {'splits': '"""train"""'}), "(task_name, splits='train')\n", (8438, 8465), False, 'from paddlenlp.datasets import load_dataset\n'), ((13970, 14023), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['task_name'], {'data_files': 'args.predict_file'}), '(task_name, data_files=args.predict_file)\n', (13982, 14023), False, 'from paddlenlp.datasets import load_dataset\n'), ((14059, 14096), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['task_name'], {'splits': '"""dev"""'}), "(task_name, splits='dev')\n", (14071, 14096), False, 'from paddlenlp.datasets import load_dataset\n'), ((2445, 2456), 'time.time', 'time.time', ([], {}), '()\n', (2454, 2456), False, 'import time\n'), ((2983, 3040), 'json.dumps', 'json.dumps', (['all_predictions'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(all_predictions, ensure_ascii=False, indent=4)\n', (2993, 3040), False, 'import json\n'), ((10957, 10968), 'time.time', 'time.time', ([], {}), '()\n', (10966, 10968), False, 'import time\n'), ((2394, 2405), 'time.time', 'time.time', ([], {}), '()\n', (2403, 2405), False, 'import time\n'), ((8733, 8776), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_id'}), '(axis=0, pad_val=tokenizer.pad_token_id)\n', (8736, 8776), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((8808, 8856), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_type_id'}), '(axis=0, pad_val=tokenizer.pad_token_type_id)\n', (8811, 8856), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((8889, 8909), 'paddlenlp.data.Stack', 'Stack', ([], {'dtype': '"""int64"""'}), "(dtype='int64')\n", (8894, 8909), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((8940, 8960), 'paddlenlp.data.Stack', 'Stack', ([], {'dtype': '"""int64"""'}), "(dtype='int64')\n", (8945, 8960), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((11273, 11328), 'os.path.join', 'os.path.join', (['args.output_dir', "('model_%d' % global_step)"], {}), "(args.output_dir, 'model_%d' % global_step)\n", (11285, 11328), False, 'import os\n'), ((14353, 14396), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_id'}), '(axis=0, pad_val=tokenizer.pad_token_id)\n', (14356, 14396), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((14428, 14476), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_type_id'}), '(axis=0, pad_val=tokenizer.pad_token_type_id)\n', (14431, 14476), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((11410, 11436), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (11424, 11436), False, 'import os\n'), ((11466, 11489), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (11477, 11489), False, 'import os\n'), ((10898, 10909), 'time.time', 'time.time', ([], {}), '()\n', (10907, 10909), False, 'import time\n')] |
# Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
# NEURAL NETWORK IMPLEMENTATION
tf.reset_default_graph()
# Feature vector for current state representation
input1 = tf.placeholder(shape=[1, env.observation_space.n], dtype=tf.float32)
# tf.Variable(<initial-value>, name=<optional-name>)
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
# Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01
W = tf.Variable(tf.random_uniform([env.observation_space.n, env.action_space.n], 0, 0.01))
# Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state
Qout = tf.matmul(input1, W)
# Greedy action at a state
predict = tf.argmax(Qout, axis=1)
# Feature vector for next state representation
nextQ = tf.placeholder(shape=[1, env.action_space.n], dtype=tf.float32)
# Entropy loss
loss = tf.reduce_sum(tf.square(Qout - nextQ))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
# TRAIN THE NETWORK
init = tf.global_variables_initializer()
# Set learning parameters
y = 0.99
e = 0.1
number_episodes = 2000
# List to store total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(number_episodes):
print("Episode #{} is running!".format(i))
# First state
s = env.reset()
rAll = 0
d = False
j = 0
# Q network
while j < 200: # or While not d:
j += 1
# Choose action by epsilon (e) greedy
# print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1])
# s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# Identity [s:s+1] is a one-hot vector
# Therefore W is the actual Q value
a, allQ = sess.run([predict, Qout], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
s1, r, d, _ = env.step(a[0])
# Obtain next state Q value by feeding the new state throughout the network
Q1 = sess.run(Qout, feed_dict={input1: np.identity(env.observation_space.n)[s1:s1+1]})
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0, a[0]] = r + y * maxQ1
# Train our network using target and predicted Q values
_, W1 = sess.run([updateModel, W], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1], nextQ: targetQ})
rAll += r
s = s1
if d:
e = 1./((i/50) + 10)
break
jList.append(j)
rList.append(rAll)
env.close()
plt.figure()
plt.plot(rList, label="Return - Q Learning")
plt.show()
plt.figure()
plt.plot(jList, label="Steps - Q Learning")
plt.show()
# -------------------------------------------------------------------------
# TABULAR IMPLEMENTATION
#
# # Set learning parameters
# lr = 0.8
# y = 0.95
# number_episodes = 20000
#
# # Initial table with all zeros
# Q = np.zeros([env.observation_space.n, env.action_space.n])
#
# # List of reward and steps per episode
# rList = []
# for i in range (number_episodes):
# print("Episode #{} is running!".format(i))
# s = env.reset()
# rAll = 0
# d = False
# j = 0
# while j < 99:
# j += 1
# # Choose an action by greedily (with noise) picking from Q table
# # Because of the noise, it is epsilon-greedy with epsilon decreasing over time
# a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1)))
# s1, r, d, _ = env.step(a)
# # env.render()
#
# # Update Q table with new knowledge
# Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a])
# rAll += r
# s = s1
# if d:
# break
# rList.append(rAll)
| [
"numpy.identity",
"tensorflow.reset_default_graph",
"numpy.random.rand",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"tensorflow.Session",
"numpy.max",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"matplotlib.pyplot.figure",
"tensorflow.random_uniform",
"tensorflow.matmul",
"tensorflow.square",
"gym.make",
"matplotlib.pyplot.show"
] | [((251, 276), 'gym.make', 'gym.make', (['"""FrozenLake-v0"""'], {}), "('FrozenLake-v0')\n", (259, 276), False, 'import gym\n'), ((310, 334), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (332, 334), True, 'import tensorflow as tf\n'), ((395, 463), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, env.observation_space.n]', 'dtype': 'tf.float32'}), '(shape=[1, env.observation_space.n], dtype=tf.float32)\n', (409, 463), True, 'import tensorflow as tf\n'), ((895, 915), 'tensorflow.matmul', 'tf.matmul', (['input1', 'W'], {}), '(input1, W)\n', (904, 915), True, 'import tensorflow as tf\n'), ((954, 977), 'tensorflow.argmax', 'tf.argmax', (['Qout'], {'axis': '(1)'}), '(Qout, axis=1)\n', (963, 977), True, 'import tensorflow as tf\n'), ((1034, 1097), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, env.action_space.n]', 'dtype': 'tf.float32'}), '(shape=[1, env.action_space.n], dtype=tf.float32)\n', (1048, 1097), True, 'import tensorflow as tf\n'), ((1170, 1222), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (1203, 1222), True, 'import tensorflow as tf\n'), ((1288, 1321), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1319, 1321), True, 'import tensorflow as tf\n'), ((3132, 3144), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3142, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3189), 'matplotlib.pyplot.plot', 'plt.plot', (['rList'], {'label': '"""Return - Q Learning"""'}), "(rList, label='Return - Q Learning')\n", (3153, 3189), True, 'import matplotlib.pyplot as plt\n'), ((3190, 3200), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3198, 3200), True, 'import matplotlib.pyplot as plt\n'), ((3202, 3214), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3212, 3214), True, 'import matplotlib.pyplot as plt\n'), ((3215, 3258), 'matplotlib.pyplot.plot', 'plt.plot', (['jList'], {'label': '"""Steps - Q Learning"""'}), "(jList, label='Steps - Q Learning')\n", (3223, 3258), True, 'import matplotlib.pyplot as plt\n'), ((3259, 3269), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3267, 3269), True, 'import matplotlib.pyplot as plt\n'), ((703, 776), 'tensorflow.random_uniform', 'tf.random_uniform', (['[env.observation_space.n, env.action_space.n]', '(0)', '(0.01)'], {}), '([env.observation_space.n, env.action_space.n], 0, 0.01)\n', (720, 776), True, 'import tensorflow as tf\n'), ((1135, 1158), 'tensorflow.square', 'tf.square', (['(Qout - nextQ)'], {}), '(Qout - nextQ)\n', (1144, 1158), True, 'import tensorflow as tf\n'), ((1469, 1481), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1479, 1481), True, 'import tensorflow as tf\n'), ((2668, 2678), 'numpy.max', 'np.max', (['Q1'], {}), '(Q1)\n', (2674, 2678), True, 'import numpy as np\n'), ((2347, 2364), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2361, 2364), True, 'import numpy as np\n'), ((2285, 2321), 'numpy.identity', 'np.identity', (['env.observation_space.n'], {}), '(env.observation_space.n)\n', (2296, 2321), True, 'import numpy as np\n'), ((2600, 2636), 'numpy.identity', 'np.identity', (['env.observation_space.n'], {}), '(env.observation_space.n)\n', (2611, 2636), True, 'import numpy as np\n'), ((2886, 2922), 'numpy.identity', 'np.identity', (['env.observation_space.n'], {}), '(env.observation_space.n)\n', (2897, 2922), True, 'import numpy as np\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All KeyTypes and which languages support them."""
# Placeholder for import for type annotations
from tink import aead
from tink import daead
from tink import hybrid
from tink import mac
from tink import prf
from tink import signature
from tink import streaming_aead
from tink.proto import tink_pb2
# All languages supported by cross-language tests.
ALL_LANGUAGES = ['cc', 'java', 'go', 'python']
# All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.')
AEAD_KEY_TYPES = [
'AesEaxKey',
'AesGcmKey',
'AesGcmSivKey',
'AesCtrHmacAeadKey',
'ChaCha20Poly1305Key',
'XChaCha20Poly1305Key',
]
DAEAD_KEY_TYPES = ['AesSivKey']
STREAMING_AEAD_KEY_TYPES = [
'AesCtrHmacStreamingKey',
'AesGcmHkdfStreamingKey',
]
HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey']
MAC_KEY_TYPES = [
'AesCmacKey',
'HmacKey',
]
SIGNATURE_KEY_TYPES = [
'EcdsaPrivateKey',
'Ed25519PrivateKey',
'RsaSsaPkcs1PrivateKey',
'RsaSsaPssPrivateKey',
]
PRF_KEY_TYPES = [
'AesCmacPrfKey',
'HmacPrfKey',
'HkdfPrfKey',
]
ALL_KEY_TYPES = (
AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES +
HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES +
PRF_KEY_TYPES)
# All languages that are supported by a KeyType
SUPPORTED_LANGUAGES = {
'AesEaxKey': ['cc', 'java', 'python'],
'AesGcmKey': ['cc', 'java', 'go', 'python'],
'AesGcmSivKey': ['cc', 'python'],
'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'],
'ChaCha20Poly1305Key': ['java', 'go'],
'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'],
'AesSivKey': ['cc', 'java', 'go', 'python'],
'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'],
'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'],
'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'],
'AesCmacKey': ['cc', 'java', 'go', 'python'],
'HmacKey': ['cc', 'java', 'go', 'python'],
'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'],
'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'],
'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'RsaSsaPssPrivateKey': ['cc', 'java', 'python'],
'AesCmacPrfKey': ['cc', 'java', 'go', 'python'],
'HmacPrfKey': ['cc', 'java', 'go', 'python'],
'HkdfPrfKey': ['cc', 'java', 'go', 'python'],
}
KEY_TYPE_FROM_URL = {
'type.googleapis.com/google.crypto.tink.' + key_type: key_type
for key_type in ALL_KEY_TYPES}
# For each KeyType, a list of all KeyTemplate Names that must be supported.
KEY_TEMPLATE_NAMES = {
'AesEaxKey': ['AES128_EAX', 'AES256_EAX'],
'AesGcmKey': ['AES128_GCM', 'AES256_GCM'],
'AesGcmSivKey': ['AES128_GCM_SIV', 'AES256_GCM_SIV'],
'AesCtrHmacAeadKey': ['AES128_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256'],
'ChaCha20Poly1305Key': ['CHACHA20_POLY1305'],
'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305'],
'AesSivKey': ['AES256_SIV'],
'AesCtrHmacStreamingKey': [
'AES128_CTR_HMAC_SHA256_4KB',
'AES256_CTR_HMAC_SHA256_4KB',
],
'AesGcmHkdfStreamingKey': [
'AES128_GCM_HKDF_4KB',
'AES256_GCM_HKDF_4KB',
'AES256_GCM_HKDF_1MB',
],
'EciesAeadHkdfPrivateKey': [
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256'
],
'AesCmacKey': ['AES_CMAC'],
'HmacKey': [
'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG',
'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG'
],
'EcdsaPrivateKey': [
'ECDSA_P256', 'ECDSA_P384', 'ECDSA_P384_SHA384', 'ECDSA_P521',
'ECDSA_P256_IEEE_P1363', 'ECDSA_P384_IEEE_P1363',
'ECDSA_P384_SHA384_IEEE_P1363', 'ECDSA_P521_IEEE_P1363'
],
'Ed25519PrivateKey': ['ED25519'],
'RsaSsaPkcs1PrivateKey': [
'RSA_SSA_PKCS1_3072_SHA256_F4', 'RSA_SSA_PKCS1_4096_SHA512_F4'
],
'RsaSsaPssPrivateKey': [
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4',
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4'
],
'AesCmacPrfKey': ['AES_CMAC_PRF'],
'HmacPrfKey': ['HMAC_PRF_SHA256', 'HMAC_PRF_SHA512'],
'HkdfPrfKey': ['<KEY>'],
}
# KeyTemplate (as Protobuf) for each KeyTemplate name.
KEY_TEMPLATE = {
'AES128_EAX':
aead.aead_key_templates.AES128_EAX,
'AES256_EAX':
aead.aead_key_templates.AES256_EAX,
'AES128_GCM':
aead.aead_key_templates.AES128_GCM,
'AES256_GCM':
aead.aead_key_templates.AES256_GCM,
'AES128_GCM_SIV':
aead.aead_key_templates.AES128_GCM_SIV,
'AES256_GCM_SIV':
aead.aead_key_templates.AES256_GCM_SIV,
'AES128_CTR_HMAC_SHA256':
aead.aead_key_templates.AES128_CTR_HMAC_SHA256,
'AES256_CTR_HMAC_SHA256':
aead.aead_key_templates.AES256_CTR_HMAC_SHA256,
'CHACHA20_POLY1305':
tink_pb2.KeyTemplate(
type_url=('type.googleapis.com/google.crypto.tink.' +
'ChaCha20Poly1305Key'),
output_prefix_type=tink_pb2.TINK),
'XCHACHA20_POLY1305':
aead.aead_key_templates.XCHACHA20_POLY1305,
'AES256_SIV':
daead.deterministic_aead_key_templates.AES256_SIV,
'AES128_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB,
'AES256_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB,
'AES128_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB,
'AES256_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_4KB,
'AES256_GCM_HKDF_1MB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_1MB,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM':
hybrid.hybrid_key_templates.ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256':
hybrid.hybrid_key_templates
.ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256,
'AES_CMAC':
mac.mac_key_templates.AES_CMAC,
'HMAC_SHA256_128BITTAG':
mac.mac_key_templates.HMAC_SHA256_128BITTAG,
'HMAC_SHA256_256BITTAG':
mac.mac_key_templates.HMAC_SHA256_256BITTAG,
'HMAC_SHA512_256BITTAG':
mac.mac_key_templates.HMAC_SHA512_256BITTAG,
'HMAC_SHA512_512BITTAG':
mac.mac_key_templates.HMAC_SHA512_512BITTAG,
'ECDSA_P256':
signature.signature_key_templates.ECDSA_P256,
'ECDSA_P384':
signature.signature_key_templates.ECDSA_P384,
'ECDSA_P384_SHA384':
signature.signature_key_templates.ECDSA_P384_SHA384,
'ECDSA_P521':
signature.signature_key_templates.ECDSA_P521,
'ECDSA_P256_IEEE_P1363':
signature.signature_key_templates.ECDSA_P256_IEEE_P1363,
'ECDSA_P384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_IEEE_P1363,
'ECDSA_P384_SHA384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_SHA384_IEEE_P1363,
'ECDSA_P521_IEEE_P1363':
signature.signature_key_templates.ECDSA_P521_IEEE_P1363,
'ED25519':
signature.signature_key_templates.ED25519,
'RSA_SSA_PKCS1_3072_SHA256_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4,
'RSA_SSA_PKCS1_4096_SHA512_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4,
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4':
signature.signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4,
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4':
signature.signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4,
'AES_CMAC_PRF':
prf.prf_key_templates.AES_CMAC,
'HMAC_PRF_SHA256':
prf.prf_key_templates.HMAC_SHA256,
'HMAC_PRF_SHA512':
prf.prf_key_templates.HMAC_SHA512,
'HKDF_PRF_SHA256':
prf.prf_key_templates.HKDF_SHA256,
}
SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = {
name: SUPPORTED_LANGUAGES[KEY_TYPE_FROM_URL[template.type_url]]
for name, template in KEY_TEMPLATE.items()
}
| [
"tink.proto.tink_pb2.KeyTemplate"
] | [((5320, 5454), 'tink.proto.tink_pb2.KeyTemplate', 'tink_pb2.KeyTemplate', ([], {'type_url': "('type.googleapis.com/google.crypto.tink.' + 'ChaCha20Poly1305Key')", 'output_prefix_type': 'tink_pb2.TINK'}), "(type_url='type.googleapis.com/google.crypto.tink.' +\n 'ChaCha20Poly1305Key', output_prefix_type=tink_pb2.TINK)\n", (5340, 5454), False, 'from tink.proto import tink_pb2\n')] |
#!/usr/bin/env python3
#########################################################
# Written by <NAME>, <EMAIL>
# Copyright (c) 2018 Blockscale LLC
# released under the MIT license
#########################################################
from flask import Flask, request, Response, json, jsonify
from src.remote_signer import RemoteSigner
from os import path
import logging
logging.basicConfig(filename='./remote-signer.log', format='%(asctime)s %(message)s', level=logging.INFO)
app = Flask(__name__)
# sample config used for testing
config = {
'hsm_username': 'resigner',
'hsm_slot': 1,
'hsm_lib': '/opt/cloudhsm/lib/libcloudhsm_pkcs11.so',
'node_addr': 'http://node.internal:8732',
'keys': {
'<KEY>': {
'public_key': '<KEY>',
'private_handle': 7,
'public_handle': 9
}
}
}
logging.info('Opening keys.json')
if path.isfile('keys.json'):
logging.info('Found keys.json')
with open('keys.json', 'r') as myfile:
json_blob = myfile.read().replace('\n', '')
logging.info('Parsed keys.json successfully as JSON')
config = json.loads(json_blob)
logging.info('Config contains: {}'.format(json.dumps(config, indent=2)))
@app.route('/keys/<key_hash>', methods=['POST'])
def sign(key_hash):
response = None
try:
data = request.get_json(force=True)
if key_hash in config['keys']:
logging.info('Found key_hash {} in config'.format(key_hash))
key = config['keys'][key_hash]
logging.info('Attempting to sign {}'.format(data))
rs = RemoteSigner(config, data)
response = jsonify({
'signature': rs.sign(key['private_handle'])
})
logging.info('Response is {}'.format(response))
else:
logging.warning("Couldn't find key {}".format(key_hash))
response = Response('Key not found', status=404)
except Exception as e:
data = {'error': str(e)}
logging.error('Exception thrown during request: {}'.format(str(e)))
response = app.response_class(
response=json.dumps(data),
status=500,
mimetype='application/json'
)
logging.info('Returning flask response {}'.format(response))
return response
@app.route('/keys/<key_hash>', methods=['GET'])
def get_public_key(key_hash):
response = None
try:
if key_hash in config['keys']:
key = config['keys'][key_hash]
response = jsonify({
'public_key': key['public_key']
})
logging.info('Found public key {} for key hash {}'.format(key['public_key'], key_hash))
else:
logging.warning("Couldn't public key for key hash {}".format(key_hash))
response = Response('Key not found', status=404)
except Exception as e:
data = {'error': str(e)}
logging.error('Exception thrown during request: {}'.format(str(e)))
response = app.response_class(
response=json.dumps(data),
status=500,
mimetype='application/json'
)
logging.info('Returning flask response {}'.format(response))
return response
@app.route('/authorized_keys', methods=['GET'])
def authorized_keys():
return app.response_class(
response=json.dumps({}),
status=200,
mimetype='application/json'
)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
| [
"logging.basicConfig",
"flask.Flask",
"flask.json.dumps",
"os.path.isfile",
"flask.request.get_json",
"flask.json.loads",
"flask.Response",
"src.remote_signer.RemoteSigner",
"logging.info",
"flask.jsonify"
] | [((376, 486), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./remote-signer.log"""', 'format': '"""%(asctime)s %(message)s"""', 'level': 'logging.INFO'}), "(filename='./remote-signer.log', format=\n '%(asctime)s %(message)s', level=logging.INFO)\n", (395, 486), False, 'import logging\n'), ((489, 504), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (494, 504), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((856, 889), 'logging.info', 'logging.info', (['"""Opening keys.json"""'], {}), "('Opening keys.json')\n", (868, 889), False, 'import logging\n'), ((893, 917), 'os.path.isfile', 'path.isfile', (['"""keys.json"""'], {}), "('keys.json')\n", (904, 917), False, 'from os import path\n'), ((923, 954), 'logging.info', 'logging.info', (['"""Found keys.json"""'], {}), "('Found keys.json')\n", (935, 954), False, 'import logging\n'), ((1058, 1111), 'logging.info', 'logging.info', (['"""Parsed keys.json successfully as JSON"""'], {}), "('Parsed keys.json successfully as JSON')\n", (1070, 1111), False, 'import logging\n'), ((1129, 1150), 'flask.json.loads', 'json.loads', (['json_blob'], {}), '(json_blob)\n', (1139, 1150), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((1347, 1375), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (1363, 1375), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((1611, 1637), 'src.remote_signer.RemoteSigner', 'RemoteSigner', (['config', 'data'], {}), '(config, data)\n', (1623, 1637), False, 'from src.remote_signer import RemoteSigner\n'), ((1912, 1949), 'flask.Response', 'Response', (['"""Key not found"""'], {'status': '(404)'}), "('Key not found', status=404)\n", (1920, 1949), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((2537, 2579), 'flask.jsonify', 'jsonify', (["{'public_key': key['public_key']}"], {}), "({'public_key': key['public_key']})\n", (2544, 2579), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((2831, 2868), 'flask.Response', 'Response', (['"""Key not found"""'], {'status': '(404)'}), "('Key not found', status=404)\n", (2839, 2868), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((3363, 3377), 'flask.json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (3373, 3377), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((1201, 1229), 'flask.json.dumps', 'json.dumps', (['config'], {'indent': '(2)'}), '(config, indent=2)\n', (1211, 1229), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((2146, 2162), 'flask.json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2156, 2162), False, 'from flask import Flask, request, Response, json, jsonify\n'), ((3065, 3081), 'flask.json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3075, 3081), False, 'from flask import Flask, request, Response, json, jsonify\n')] |
from palm_tree import db
class Data(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.Integer)
response = db.Column(db.Text)
datetime = db.Column(db.DateTime)
def __init__(self, uuid, response, datetime):
self.uuid = uuid
self.response = response
self.datetime = datetime
def __repr__(self):
return '<Data %r>' % self.response
#
# class Logs(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# uuid = db.Column(db.Integer)
# payload = db.Column(db.Text)
# datetime = db.Column(db.DateTime)
#
# def __init__(self, uuid, payload, datetime):
# self.uuid = uuid
# self.payload = payload
# self.datetime = datetime
#
# def __repr__(self):
# return '<Data %r>' % self.payload
| [
"palm_tree.db.Column"
] | [((58, 97), 'palm_tree.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (67, 97), False, 'from palm_tree import db\n'), ((109, 130), 'palm_tree.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (118, 130), False, 'from palm_tree import db\n'), ((146, 164), 'palm_tree.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (155, 164), False, 'from palm_tree import db\n'), ((180, 202), 'palm_tree.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (189, 202), False, 'from palm_tree import db\n')] |
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
import subprocess
import sys
from scapy import all as scapy
DHCP_OFFER_COLUMNS = ('iface', 'mac', 'server_ip', 'server_id', 'gateway',
'dport', 'message', 'yiaddr')
def command_util(*command):
"""object with stderr and stdout
"""
return subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def _check_vconfig():
"""Check vconfig installed or not
"""
return not command_util('which', 'vconfig').stderr.read()
def _iface_state(iface):
"""For a given iface return it's state
returns UP, DOWN, UNKNOWN
"""
state = command_util('ip', 'link', 'show', iface).stdout.read()
search_result = re.search(r'.*<(?P<state>.*)>.*', state)
if search_result:
state_list = search_result.groupdict().get('state', [])
if 'UP' in state_list:
return 'UP'
else:
return 'DOWN'
return 'UNKNOWN'
def check_network_up(iface):
return _iface_state(iface) == 'UP'
def check_iface_exist(iface):
"""Check provided interface exists
"""
return not command_util("ip", "link", "show", iface).stderr.read()
def filtered_ifaces(ifaces):
for iface in ifaces:
if not check_iface_exist(iface):
sys.stderr.write('Iface {0} does not exist.'.format(iface))
else:
if not check_network_up(iface):
sys.stderr.write('Network for iface {0} is down.'.format(
iface))
else:
yield iface
def pick_ip(range_start, range_end):
"""Given start_range, end_range generate list of ips
>>> next(pick_ip('192.168.1.10','192.168.1.13'))
'192.168.1.10'
"""
split_address = lambda ip_address: \
[int(item) for item in ip_address.split('.')]
range_start = split_address(range_start)
range_end = split_address(range_end)
i = 0
# ipv4 subnet cant be longer that 4 items
while i < 4:
# 255 - end of subnet
if not range_start[i] == range_end[i] and range_start[i] < 255:
yield '.'.join([str(item) for item in range_start])
range_start[i] += 1
else:
i += 1
def get_item_properties(item, columns):
"""Get specified in columns properties, with preserved order.
Required for correct cli table generation
:param item: dict
:param columns: list with arbitrary keys
"""
properties = []
for key in columns:
properties.append(item.get(key, ''))
return properties
def format_options(options):
"""Util for serializing dhcp options
@options = [1,2,3]
>>> format_options([1, 2, 3])
'\x01\x02\x03'
"""
return "".join((chr(item) for item in options))
def _dhcp_options(dhcp_options):
"""Dhcp options returned by scapy is not in usable format
[('message-type', 2), ('server_id', '192.168.0.5'),
('name_server', '192.168.0.1', '192.168.0.2'), 'end']
"""
for option in dhcp_options:
if isinstance(option, (tuple, list)):
header = option[0]
if len(option[1:]) > 1:
yield (header, option)
else:
yield (header, option[1])
def format_answer(ans, iface):
dhcp_options = dict(_dhcp_options(ans[scapy.DHCP].options))
results = (
iface, ans[scapy.Ether].src, ans[scapy.IP].src,
dhcp_options['server_id'], ans[scapy.BOOTP].giaddr,
ans[scapy.UDP].sport,
scapy.DHCPTypes[dhcp_options['message-type']],
ans[scapy.BOOTP].yiaddr)
return dict(zip(DHCP_OFFER_COLUMNS, results))
def single_format(func):
"""Manage format of dhcp response
"""
@functools.wraps(func)
def formatter(*args, **kwargs):
iface = args[0]
ans = func(*args, **kwargs)
#scapy stores all sequence of requests
#so ans[0][1] would be response to first request
return [format_answer(response[1], iface) for response in ans]
return formatter
def multiproc_map(func):
# multiproc map could not work with format *args
@functools.wraps(func)
def workaround(*args, **kwargs):
args = args[0] if isinstance(args[0], (tuple, list)) else args
return func(*args, **kwargs)
return workaround
def filter_duplicated_results(func):
# due to network infra on broadcast multiple duplicated results
# returned. This helper filter them out
@functools.wraps(func)
def wrapper(*args, **kwargs):
resp = func(*args, **kwargs)
return (dict(t) for t in set([tuple(d.items()) for d in resp]))
return wrapper
class VlansContext(object):
"""Contains all logic to manage vlans
"""
def __init__(self, config):
"""Initialize VlansContext
@config - list or tuple of (iface, vlan) pairs
"""
self.config = config
def __enter__(self):
for iface, vlans in self.config.iteritems():
vifaces = []
for vlan in vlans:
if vlan > 0:
vifaces.append('{0}.{1}'.format(iface, vlan))
yield str(iface), vifaces
def __exit__(self, type, value, trace):
pass
class IfaceState(object):
"""Context manager to control state of iface when dhcp checker is running
"""
def __init__(self, iface, rollback=True, retry=3):
self.rollback = rollback
self.retry = retry
self.iface = iface
self.pre_iface_state = _iface_state(iface)
self.iface_state = self.pre_iface_state
self.post_iface_state = ''
def iface_up(self):
while self.retry and self.iface_state != 'UP':
command_util('ifconfig', self.iface, 'up')
self.iface_state = _iface_state(self.iface)
self.retry -= 1
if self.iface_state != 'UP':
raise EnvironmentError(
'Tried my best to ifup iface {0}.'.format(self.iface))
def __enter__(self):
self.iface_up()
return self.iface
def __exit__(self, exc_type, exc_val, exc_tb):
if self.pre_iface_state != 'UP' and self.rollback:
command_util('ifconfig', self.iface, 'down')
self.post_iface_state = _iface_state(self.iface)
| [
"subprocess.Popen",
"functools.wraps",
"re.search"
] | [((912, 985), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (928, 985), False, 'import subprocess\n'), ((1342, 1381), 're.search', 're.search', (['""".*<(?P<state>.*)>.*"""', 'state'], {}), "('.*<(?P<state>.*)>.*', state)\n", (1351, 1381), False, 'import re\n'), ((4332, 4353), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (4347, 4353), False, 'import functools\n'), ((4731, 4752), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (4746, 4752), False, 'import functools\n'), ((5076, 5097), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (5091, 5097), False, 'import functools\n')] |
from ma import ma
from models.johnson_scanner_data import JohnsonScannerDataModel
from schemas.brand import BrandSchema
from schemas.category import CategorySchema
from schemas.facts_in_data import FactsInDataSchema
from schemas.market import MarketSchema
from schemas.period import PeriodSchema
class JohnsonScannerDataSchema(ma.SQLAlchemySchema):
market = ma.Nested(MarketSchema)
brand = ma.Nested(BrandSchema)
category = ma.Nested(CategorySchema)
period = ma.Nested(PeriodSchema)
facts = ma.Nested(FactsInDataSchema, many=True)
class Meta:
model = JohnsonScannerDataModel
dump_only = ("id",)
# include_fk = False
| [
"ma.ma.Nested"
] | [((364, 387), 'ma.ma.Nested', 'ma.Nested', (['MarketSchema'], {}), '(MarketSchema)\n', (373, 387), False, 'from ma import ma\n'), ((400, 422), 'ma.ma.Nested', 'ma.Nested', (['BrandSchema'], {}), '(BrandSchema)\n', (409, 422), False, 'from ma import ma\n'), ((438, 463), 'ma.ma.Nested', 'ma.Nested', (['CategorySchema'], {}), '(CategorySchema)\n', (447, 463), False, 'from ma import ma\n'), ((477, 500), 'ma.ma.Nested', 'ma.Nested', (['PeriodSchema'], {}), '(PeriodSchema)\n', (486, 500), False, 'from ma import ma\n'), ((513, 552), 'ma.ma.Nested', 'ma.Nested', (['FactsInDataSchema'], {'many': '(True)'}), '(FactsInDataSchema, many=True)\n', (522, 552), False, 'from ma import ma\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
################################################################################
# CSE 253: Programming Assignment 3
# Winter 2019
# Code author: <NAME> (+ modifications by <NAME>)
#
# Filename: baseline_cnn.py
#
# Description:
#
# This file contains the starter code for the baseline architecture you will use
# to get a little practice with PyTorch and compare the results of with your
# improved architecture.
#
# Be sure to fill in the code in the areas marked #TODO.
################################################################################
# PyTorch and neural network imports
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as func
import torch.nn.init as torch_init
import torch.optim as optim
# Data utils and dataloader
import torchvision
from torchvision import transforms, utils
from xray_dataloader_zscored import ChestXrayDataset, create_split_loaders
import matplotlib.pyplot as plt
import numpy as np
import os
class Arch2CNN(nn.Module):
"""
<<<<<<< HEAD
conv1 -> maxpool -> conv2 -> maxpool -> conv3 -> conv4 ->maxpool -> conv5 -> conv6 -> maxpool -> conv7 -> conv8 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
=======
conv1 -> conv2 -> maxpool -> conv3 -> conv4 -> conv5 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
>>>>>>> 6652e3cfb72835ac4a7c802c9a703b59d5f63ae6
"""
def __init__(self):
super(Arch2CNN, self).__init__()
# conv1: 1 input channel, 4 output channels, [3x3] kernel size
self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3)
# Add batch-normalization to the outputs of conv1
self.conv1_normed = nn.BatchNorm2d(4)
# Initialized weights using the Xavier-Normal method
torch_init.xavier_normal_(self.conv1.weight)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Fill in the remaining initializations replacing each '_' with
# the necessary value based on the provided specs for each layer
#TODO: conv2: 4 input channels, 8 output channels, [3x3] kernel, initialization: xavier
self.conv2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3)
self.conv2_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv2.weight)
#Maxpool
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv3: X input channels, 12 output channels, [8x8] kernel, initialization: xavier
self.conv3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3)
self.conv3_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv3.weight)
#TODO: conv4: X input channels, 10 output channels, [6x6] kernel, initialization: xavier
self.conv4 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3)
self.conv4_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv4.weight)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv5: X input channels, 8 output channels, [5x5] kernel, initialization: xavier
self.conv5 = nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3)
self.conv5_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv5.weight)
self.conv6 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv6_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv6.weight)
self.pool4 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Apply max-pooling with a [3x3] kernel using tiling (*NO SLIDING WINDOW*)
self.conv7 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv7_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv7.weight)
self.conv8 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv8_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv8.weight)
self.pool5 = nn.MaxPool2d(kernel_size=4, stride=4)
# Define 2 fully connected layers:
#TODO: fc1
self.fc1 = nn.Linear(in_features=122*122*8, out_features=512)
self.fc1_normed = nn.BatchNorm1d(512)
torch_init.xavier_normal_(self.fc1.weight)
#TODO: fc2
self.fc2 = nn.Linear(in_features=512, out_features=128)
self.fc2_normed = nn.BatchNorm1d(128)
torch_init.xavier_normal_(self.fc2.weight)
#TODO: fc3
self.fc3 = nn.Linear(in_features=128, out_features=14)
torch_init.xavier_normal_(self.fc3.weight)
#TODO: Output layer: what should out_features be?
self.out_features = 14
def forward(self, batch):
"""Pass the batch of images through each layer of the network, applying
non-linearities after each layer.
Note that this function *needs* to be called "forward" for PyTorch to
automagically perform the forward pass.
Params:
-------
- batch: (Tensor) An input batch of images
Returns:
--------
- logits: (Variable) The output of the network
"""
# Apply first convolution, followed by ReLU non-linearity;
# use batch-normalization on its outputs
batch = func.rrelu(self.conv1_normed(self.conv1(batch)))
batch = self.pool1(batch)
# Apply conv2 and conv3 similarly
batch = func.rrelu(self.conv2_normed(self.conv2(batch)))
batch = self.pool2(batch)
batch = func.rrelu(self.conv3_normed(self.conv3(batch)))
batch = func.rrelu(self.conv4_normed(self.conv4(batch)))
batch = self.pool3(batch)
batch = func.rrelu(self.conv5_normed(self.conv5(batch)))
batch = func.rrelu(self.conv6_normed(self.conv6(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool4(batch)
batch = func.rrelu(self.conv7_normed(self.conv7(batch)))
batch = func.rrelu(self.conv8_normed(self.conv8(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool5(batch)
# Reshape the output of the conv3 to pass to fully-connected layer
batch = batch.view(-1, self.num_flat_features(batch))
# Connect the reshaped features of the pooled conv3 to fc1
batch = func.rrelu(self.fc1_normed(self.fc1(batch)))
batch = func.rrelu(self.fc2_normed(self.fc2(batch)))
# Connect fc1 to fc2 - this layer is slightly different than the rest (why?)
batch = self.fc3(batch)
# Return the class predictions
#TODO: apply an activition function to 'batch'
#batch = func.sigmoid(batch)
return batch
def num_flat_features(self, inputs):
# Get the dimensions of the layers excluding the inputs
size = inputs.size()[1:]
# Track the number of features
num_features = 1
for s in size:
num_features *= s
return num_features
| [
"torch.nn.BatchNorm2d",
"torch.nn.init.xavier_normal_",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.MaxPool2d",
"torch.nn.Linear"
] | [((1570, 1625), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(4)', 'kernel_size': '(3)'}), '(in_channels=1, out_channels=4, kernel_size=3)\n', (1579, 1625), True, 'import torch.nn as nn\n'), ((1713, 1730), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4)'], {}), '(4)\n', (1727, 1730), True, 'import torch.nn as nn\n'), ((1801, 1845), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.conv1.weight'], {}), '(self.conv1.weight)\n', (1826, 1845), True, 'import torch.nn.init as torch_init\n'), ((1867, 1904), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(1)'}), '(kernel_size=3, stride=1)\n', (1879, 1904), True, 'import torch.nn as nn\n'), ((2174, 2229), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(4)', 'out_channels': '(8)', 'kernel_size': '(3)'}), '(in_channels=4, out_channels=8, kernel_size=3)\n', (2183, 2229), True, 'import torch.nn as nn\n'), ((2258, 2275), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8)'], {}), '(8)\n', (2272, 2275), True, 'import torch.nn as nn\n'), ((2284, 2328), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.conv2.weight'], {}), '(self.conv2.weight)\n', (2309, 2328), True, 'import torch.nn.init as torch_init\n'), ((2367, 2404), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(1)'}), '(kernel_size=3, stride=1)\n', (2379, 2404), True, 'import torch.nn as nn\n'), ((2525, 2581), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(8)', 'out_channels': '(16)', 'kernel_size': '(3)'}), '(in_channels=8, out_channels=16, kernel_size=3)\n', (2534, 2581), True, 'import torch.nn as nn\n'), ((2610, 2628), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (2624, 2628), True, 'import torch.nn as nn\n'), ((2637, 2681), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.conv3.weight'], {}), '(self.conv3.weight)\n', (2662, 2681), True, 'import torch.nn.init as torch_init\n'), ((2800, 2857), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(16)', 'out_channels': '(16)', 'kernel_size': '(3)'}), '(in_channels=16, out_channels=16, kernel_size=3)\n', (2809, 2857), True, 'import torch.nn as nn\n'), ((2886, 2904), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (2900, 2904), True, 'import torch.nn as nn\n'), ((2913, 2957), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.conv4.weight'], {}), '(self.conv4.weight)\n', (2938, 2957), True, 'import torch.nn.init as torch_init\n'), ((2979, 3016), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(1)'}), '(kernel_size=3, stride=1)\n', (2991, 3016), True, 'import torch.nn as nn\n'), ((3134, 3190), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(16)', 'out_channels': '(8)', 'kernel_size': '(3)'}), '(in_channels=16, out_channels=8, kernel_size=3)\n', (3143, 3190), True, 'import torch.nn as nn\n'), ((3219, 3236), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8)'], {}), '(8)\n', (3233, 3236), True, 'import torch.nn as nn\n'), ((3245, 3289), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.conv5.weight'], {}), '(self.conv5.weight)\n', (3270, 3289), True, 'import torch.nn.init as torch_init\n'), ((3312, 3367), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(8)', 'out_channels': '(8)', 'kernel_size': '(3)'}), '(in_channels=8, out_channels=8, kernel_size=3)\n', (3321, 3367), True, 'import torch.nn as nn\n'), ((3396, 3413), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8)'], {}), '(8)\n', (3410, 3413), True, 'import torch.nn as nn\n'), ((3422, 3466), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.conv6.weight'], {}), '(self.conv6.weight)\n', (3447, 3466), True, 'import torch.nn.init as torch_init\n'), ((3488, 3525), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(1)'}), '(kernel_size=3, stride=1)\n', (3500, 3525), True, 'import torch.nn as nn\n'), ((3636, 3691), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(8)', 'out_channels': '(8)', 'kernel_size': '(3)'}), '(in_channels=8, out_channels=8, kernel_size=3)\n', (3645, 3691), True, 'import torch.nn as nn\n'), ((3720, 3737), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8)'], {}), '(8)\n', (3734, 3737), True, 'import torch.nn as nn\n'), ((3746, 3790), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.conv7.weight'], {}), '(self.conv7.weight)\n', (3771, 3790), True, 'import torch.nn.init as torch_init\n'), ((3813, 3868), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(8)', 'out_channels': '(8)', 'kernel_size': '(3)'}), '(in_channels=8, out_channels=8, kernel_size=3)\n', (3822, 3868), True, 'import torch.nn as nn\n'), ((3897, 3914), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8)'], {}), '(8)\n', (3911, 3914), True, 'import torch.nn as nn\n'), ((3923, 3967), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.conv8.weight'], {}), '(self.conv8.weight)\n', (3948, 3967), True, 'import torch.nn.init as torch_init\n'), ((3989, 4026), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(4)', 'stride': '(4)'}), '(kernel_size=4, stride=4)\n', (4001, 4026), True, 'import torch.nn as nn\n'), ((4110, 4164), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(122 * 122 * 8)', 'out_features': '(512)'}), '(in_features=122 * 122 * 8, out_features=512)\n', (4119, 4164), True, 'import torch.nn as nn\n'), ((4188, 4207), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (4202, 4207), True, 'import torch.nn as nn\n'), ((4216, 4258), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.fc1.weight'], {}), '(self.fc1.weight)\n', (4241, 4258), True, 'import torch.nn.init as torch_init\n'), ((4298, 4342), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(512)', 'out_features': '(128)'}), '(in_features=512, out_features=128)\n', (4307, 4342), True, 'import torch.nn as nn\n'), ((4369, 4388), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (4383, 4388), True, 'import torch.nn as nn\n'), ((4397, 4439), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.fc2.weight'], {}), '(self.fc2.weight)\n', (4422, 4439), True, 'import torch.nn.init as torch_init\n'), ((4479, 4522), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(128)', 'out_features': '(14)'}), '(in_features=128, out_features=14)\n', (4488, 4522), True, 'import torch.nn as nn\n'), ((4531, 4573), 'torch.nn.init.xavier_normal_', 'torch_init.xavier_normal_', (['self.fc3.weight'], {}), '(self.fc3.weight)\n', (4556, 4573), True, 'import torch.nn.init as torch_init\n')] |
import json
import urllib.request
import credentials
from datetime import datetime, timedelta
class NewsAPI:
def __init__(self, nyt_api):
self.nyt_access = nyt_api
def get_nyt_last_week_articles(self, topic, today):
delta = timedelta(weeks = 1)
last_week = today - delta
begin_date = last_week.strftime('%Y%m%d')
url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?q=' + topic + '&begin_date=' + begin_date + '&sort=best&type_of_material=Article&api-key=' + self.nyt_access
try:
json_url = urllib.request.urlopen(url)
articles = json.loads(json_url.read())
except:
raise RuntimeError('Failed to retrive New York Times data.')
if articles['status'] != 'OK':
num_of_articles = articles['response']['docs'].length()
if num_of_articles > 5:
return articles['response']['docs'][0:4], articles['response']['meta']['hits']
else:
return articles['response']['docs'][0:num_of_articles - 1], articles['response']['meta']['hits']
else:
raise RuntimeError('Failed to find any New York Times articles with query.')
api = NewsAPI(credentials.NYT_API)
date_time_obj = datetime.now()
api.get_nyt_last_week_articles('election', date_time_obj) | [
"datetime.datetime.now",
"datetime.timedelta"
] | [((1272, 1286), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1284, 1286), False, 'from datetime import datetime, timedelta\n'), ((251, 269), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(1)'}), '(weeks=1)\n', (260, 269), False, 'from datetime import datetime, timedelta\n')] |
from cadnano.cnproxy import UndoCommand
from cadnano.strand import Strand
from cadnano import getBatch
import cadnano.preferences as prefs
import random
class CreateXoverCommand(UndoCommand):
"""
Creates a Xover from the 3' end of strand5p to the 5' end of strand3p
this needs to
1. preserve the old oligo of strand3p
2. install the crossover
3. apply the strand5p oligo to the strand3p
"""
def __init__(self, part, strand5p, strand5p_idx, strand3p, strand3p_idx, update_oligo=True):
super(CreateXoverCommand, self).__init__("create xover")
self._part = part
self._strand5p = strand5p
self._strand5p_idx = strand5p_idx
self._strand3p = strand3p
self._strand3p_idx = strand3p_idx
self._old_oligo3p = strand3p.oligo()
self._update_oligo = update_oligo
# end def
def redo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
olg5p = strand5p.oligo()
old_olg3p = self._old_oligo3p
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
if self._update_oligo:
# Test for Loopiness
if olg5p == strand3p.oligo():
olg5p.setLoop(True)
else:
# 1. update preserved oligo length
olg5p.incrementLength(old_olg3p.length())
# 2. Remove the old oligo and apply the 5' oligo to the 3' strand
old_olg3p.removeFromPart()
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, olg5p)
# 3. install the Xover
strand5p.setConnection3p(strand3p)
strand3p.setConnection5p(strand5p)
#print('strand5p = %s, connection3p = %s'%(strand5p._name, strand3p._name))
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
# if self._update_oligo and not getBatch():
if self._update_oligo:
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
def undo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
old_olg3p = self._old_oligo3p
olg5p = strand5p.oligo()
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
# 1. uninstall the Xover
strand5p.setConnection3p(None)
strand3p.setConnection5p(None)
if self._update_oligo:
# Test Loopiness
if old_olg3p.isLoop():
old_olg3p.setLoop(False)
else:
# 2. restore the modified oligo length
olg5p.decrementLength(old_olg3p.length())
# 3. apply the old oligo to strand3p
old_olg3p.addToPart(part)
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, old_olg3p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
if self._update_oligo:
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
# end class
class RemoveXoverCommand(UndoCommand):
"""
Removes a Xover from the 3' end of strand5p to the 5' end of strand3p
this needs to
1. preserve the old oligo of strand3p
2. install the crossover
3. update the oligo length
4. apply the new strand3p oligo to the strand3p
"""
def __init__(self, part, strand5p, strand3p):
super(RemoveXoverCommand, self).__init__("remove xover")
self._part = part
self._strand5p = strand5p
self._strand5p_idx = strand5p.idx3Prime()
self._strand3p = strand3p
self._strand3p_idx = strand3p.idx5Prime()
n_o3p = self._new_oligo3p = strand3p.oligo().shallowCopy()
colorList = prefs.STAP_COLORS if strand5p.strandSet().isStaple() \
else prefs.SCAF_COLORS
n_o3p.setColor(random.choice(colorList).name())
n_o3p.setLength(0)
for strand in strand3p.generator3pStrand():
n_o3p.incrementLength(strand.totalLength())
# end def
n_o3p.setStrand5p(strand3p)
self._isLoop = strand3p.oligo().isLoop()
# end def
def redo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
new_olg3p = self._new_oligo3p
olg5p = self._strand5p.oligo()
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
# 1. uninstall the Xover
strand5p.setConnection3p(None)
strand3p.setConnection5p(None)
if self._isLoop:
olg5p.setLoop(False)
olg5p.setStrand5p(strand3p)
else:
# 2. restore the modified oligo length
olg5p.decrementLength(new_olg3p.length())
# 3. apply the old oligo to strand3p
new_olg3p.addToPart(part)
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, new_olg3p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
def undo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
olg5p = strand5p.oligo()
new_olg3p = self._new_oligo3p
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
if self._isLoop:
olg5p.setLoop(True)
# No need to restore whatever the old Oligo._strand5p was
else:
# 1. update preserved oligo length
olg5p.incrementLength(new_olg3p.length())
# 2. Remove the old oligo and apply the 5' oligo to the 3' strand
new_olg3p.removeFromPart()
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, olg5p)
# end else
# 3. install the Xover
strand5p.setConnection3p(strand3p)
strand3p.setConnection5p(strand5p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
# end class | [
"cadnano.strand.Strand.setOligo",
"random.choice"
] | [((6327, 6361), 'cadnano.strand.Strand.setOligo', 'Strand.setOligo', (['strand', 'new_olg3p'], {}), '(strand, new_olg3p)\n', (6342, 6361), False, 'from cadnano.strand import Strand\n'), ((7743, 7773), 'cadnano.strand.Strand.setOligo', 'Strand.setOligo', (['strand', 'olg5p'], {}), '(strand, olg5p)\n', (7758, 7773), False, 'from cadnano.strand import Strand\n'), ((1832, 1862), 'cadnano.strand.Strand.setOligo', 'Strand.setOligo', (['strand', 'olg5p'], {}), '(strand, olg5p)\n', (1847, 1862), False, 'from cadnano.strand import Strand\n'), ((3664, 3698), 'cadnano.strand.Strand.setOligo', 'Strand.setOligo', (['strand', 'old_olg3p'], {}), '(strand, old_olg3p)\n', (3679, 3698), False, 'from cadnano.strand import Strand\n'), ((5046, 5070), 'random.choice', 'random.choice', (['colorList'], {}), '(colorList)\n', (5059, 5070), False, 'import random\n')] |
from w1thermsensor import W1ThermSensor
sensor = W1ThermSensor()
temperature_in_celsius = sensor.get_temperature()
temperature_in_fahrenheit = sensor.get_temperature(W1ThermSensor.DEGREES_F)
temperature_in_all_units = sensor.get_temperatures([W1ThermSensor.DEGREES_C, W1ThermSensor.DEGREES_F, W1ThermSensor.KELVIN])
print("Sensor id:" + sensor.id)
print(temperature_in_celsius)
| [
"w1thermsensor.W1ThermSensor"
] | [((50, 65), 'w1thermsensor.W1ThermSensor', 'W1ThermSensor', ([], {}), '()\n', (63, 65), False, 'from w1thermsensor import W1ThermSensor\n')] |
# Generated by Django 3.1.1 on 2020-09-01 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tables', '0003_exposure_category'),
]
operations = [
migrations.AlterField(
model_name='exposure',
name='location',
field=models.CharField(blank=True, default='', max_length=200),
),
]
| [
"django.db.models.CharField"
] | [((338, 394), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(200)'}), "(blank=True, default='', max_length=200)\n", (354, 394), False, 'from django.db import migrations, models\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=too-many-lines
import os
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags)
from azure.cli.core.util import hash_string
from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri
from azure.cli.command_modules.vm._template_builder import StorageProfile
import azure.cli.core.keys as keys
from ._client_factory import _compute_client_factory
from ._actions import _get_latest_image_version
logger = get_logger(__name__)
def validate_asg_names_or_ids(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_subscription_id
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup',
resource_type=ResourceType.MGMT_NETWORK)
resource_group = namespace.resource_group_name
subscription_id = get_subscription_id(cmd.cli_ctx)
names_or_ids = getattr(namespace, 'application_security_groups')
ids = []
if names_or_ids == [""] or not names_or_ids:
return
for val in names_or_ids:
if not is_valid_resource_id(val):
val = resource_id(
subscription=subscription_id,
resource_group=resource_group,
namespace='Microsoft.Network', type='applicationSecurityGroups',
name=val
)
ids.append(ApplicationSecurityGroup(id=val))
setattr(namespace, 'application_security_groups', ids)
def validate_nsg_name(cmd, namespace):
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
vm_id = resource_id(name=namespace.vm_name, resource_group=namespace.resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines',
subscription=get_subscription_id(cmd.cli_ctx))
namespace.network_security_group_name = namespace.network_security_group_name \
or '{}_NSG_{}'.format(namespace.vm_name, hash_string(vm_id, length=8))
def validate_keyvault(cmd, namespace):
namespace.keyvault = _get_resource_id(cmd.cli_ctx, namespace.keyvault, namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
def process_vm_secret_format(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
keyvault_usage = CLIError('usage error: [--keyvault NAME --resource-group NAME | --keyvault ID]')
kv = namespace.keyvault
rg = namespace.resource_group_name
if rg:
if not kv or is_valid_resource_id(kv):
raise keyvault_usage
validate_keyvault(cmd, namespace)
else:
if kv and not is_valid_resource_id(kv):
raise keyvault_usage
def _get_resource_group_from_vault_name(cli_ctx, vault_name):
"""
Fetch resource group from vault name
:param str vault_name: name of the key vault
:return: resource group name or None
:rtype: str
"""
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from msrestazure.tools import parse_resource_id
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
for vault in client.list():
id_comps = parse_resource_id(vault.id)
if id_comps['name'] == vault_name:
return id_comps['resource_group']
return None
def _get_resource_id(cli_ctx, val, resource_group, resource_type, resource_namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if is_valid_resource_id(val):
return val
kwargs = {
'name': val,
'resource_group': resource_group,
'namespace': resource_namespace,
'type': resource_type,
'subscription': get_subscription_id(cli_ctx)
}
missing_kwargs = {k: v for k, v in kwargs.items() if not v}
return resource_id(**kwargs) if not missing_kwargs else None
def _get_nic_id(cli_ctx, val, resource_group):
return _get_resource_id(cli_ctx, val, resource_group,
'networkInterfaces', 'Microsoft.Network')
def validate_vm_nic(cmd, namespace):
namespace.nic = _get_nic_id(cmd.cli_ctx, namespace.nic, namespace.resource_group_name)
def validate_vm_nics(cmd, namespace):
rg = namespace.resource_group_name
nic_ids = []
for n in namespace.nics:
nic_ids.append(_get_nic_id(cmd.cli_ctx, n, rg))
namespace.nics = nic_ids
if hasattr(namespace, 'primary_nic') and namespace.primary_nic:
namespace.primary_nic = _get_nic_id(cmd.cli_ctx, namespace.primary_nic, rg)
def _validate_secrets(secrets, os_type):
"""
Validates a parsed JSON array containing secrets for use in VM Creation
Secrets JSON structure
[{
"sourceVault": { "id": "value" },
"vaultCertificates": [{
"certificateUrl": "value",
"certificateStore": "cert store name (only on windows)"
}]
}]
:param dict secrets: Dict fitting the JSON description above
:param string os_type: the type of OS (linux or windows)
:return: errors if any were found
:rtype: list
"""
is_windows = os_type == 'windows'
errors = []
try:
loaded_secret = [validate_file_or_dict(secret) for secret in secrets]
except Exception as err:
raise CLIError('Error decoding secrets: {0}'.format(err))
for idx_arg, narg_secret in enumerate(loaded_secret):
for idx, secret in enumerate(narg_secret):
if 'sourceVault' not in secret:
errors.append(
'Secret is missing sourceVault key at index {0} in arg {1}'.format(
idx, idx_arg))
if 'sourceVault' in secret and 'id' not in secret['sourceVault']:
errors.append(
'Secret is missing sourceVault.id key at index {0} in arg {1}'.format(
idx, idx_arg))
if 'vaultCertificates' not in secret or not secret['vaultCertificates']:
err = 'Secret is missing vaultCertificates array or it is empty at index {0} in ' \
'arg {1} '
errors.append(err.format(idx, idx_arg))
else:
for jdx, cert in enumerate(secret['vaultCertificates']):
message = 'Secret is missing {0} within vaultCertificates array at secret ' \
'index {1} and vaultCertificate index {2} in arg {3}'
if 'certificateUrl' not in cert:
errors.append(message.format('certificateUrl', idx, jdx, idx_arg))
if is_windows and 'certificateStore' not in cert:
errors.append(message.format('certificateStore', idx, jdx, idx_arg))
if errors:
raise CLIError('\n'.join(errors))
# region VM Create Validators
def _parse_image_argument(cmd, namespace):
""" Systematically determines what type is supplied for the --image parameter. Updates the
namespace and returns the type for subsequent processing. """
from msrestazure.tools import is_valid_resource_id
from msrestazure.azure_exceptions import CloudError
import re
# 1 - check if a fully-qualified ID (assumes it is an image ID)
if is_valid_resource_id(namespace.image):
return 'image_id'
# 2 - attempt to match an URN pattern
urn_match = re.match('([^:]*):([^:]*):([^:]*):([^:]*)', namespace.image)
if urn_match:
namespace.os_publisher = urn_match.group(1)
namespace.os_offer = urn_match.group(2)
namespace.os_sku = urn_match.group(3)
namespace.os_version = urn_match.group(4)
if not any([namespace.plan_name, namespace.plan_product, namespace.plan_publisher]):
image_plan = _get_image_plan_info_if_exists(cmd, namespace)
if image_plan:
namespace.plan_name = image_plan.name
namespace.plan_product = image_plan.product
namespace.plan_publisher = image_plan.publisher
return 'urn'
# 3 - unmanaged vhd based images?
if urlparse(namespace.image).scheme:
return 'uri'
# 4 - attempt to match an URN alias (most likely)
from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc
images = load_images_from_aliases_doc(cmd.cli_ctx)
matched = next((x for x in images if x['urnAlias'].lower() == namespace.image.lower()), None)
if matched:
namespace.os_publisher = matched['publisher']
namespace.os_offer = matched['offer']
namespace.os_sku = matched['sku']
namespace.os_version = matched['version']
return 'urn'
# 5 - check if an existing managed disk image resource
compute_client = _compute_client_factory(cmd.cli_ctx)
try:
compute_client.images.get(namespace.resource_group_name, namespace.image)
namespace.image = _get_resource_id(cmd.cli_ctx, namespace.image, namespace.resource_group_name,
'images', 'Microsoft.Compute')
return 'image_id'
except CloudError:
err = 'Invalid image "{}". Use a custom image name, id, or pick one from {}'
raise CLIError(err.format(namespace.image, [x['urnAlias'] for x in images]))
def _get_image_plan_info_if_exists(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
try:
compute_client = _compute_client_factory(cmd.cli_ctx)
if namespace.os_version.lower() == 'latest':
image_version = _get_latest_image_version(cmd.cli_ctx, namespace.location, namespace.os_publisher,
namespace.os_offer, namespace.os_sku)
else:
image_version = namespace.os_version
image = compute_client.virtual_machine_images.get(namespace.location,
namespace.os_publisher,
namespace.os_offer,
namespace.os_sku,
image_version)
# pylint: disable=no-member
return image.plan
except CloudError as ex:
logger.warning("Querying the image of '%s' failed for an error '%s'. Configuring plan settings "
"will be skipped", namespace.image, ex.message)
# pylint: disable=inconsistent-return-statements
def _get_storage_profile_description(profile):
if profile == StorageProfile.SACustomImage:
return 'create unmanaged OS disk created from generalized VHD'
elif profile == StorageProfile.SAPirImage:
return 'create unmanaged OS disk from Azure Marketplace image'
elif profile == StorageProfile.SASpecializedOSDisk:
return 'attach to existing unmanaged OS disk'
elif profile == StorageProfile.ManagedCustomImage:
return 'create managed OS disk from custom image'
elif profile == StorageProfile.ManagedPirImage:
return 'create managed OS disk from Azure Marketplace image'
elif profile == StorageProfile.ManagedSpecializedOSDisk:
return 'attach existing managed OS disk'
def _validate_managed_disk_sku(sku):
allowed_skus = ['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS']
if sku and sku.lower() not in [x.lower() for x in allowed_skus]:
raise CLIError("invalid storage SKU '{}': allowed values: '{}'".format(sku, allowed_skus))
def _validate_location(cmd, namespace, zone_info, size_info):
from ._vm_utils import list_sku_info
if not namespace.location:
get_default_location_from_resource_group(cmd, namespace)
if zone_info:
sku_infos = list_sku_info(cmd.cli_ctx, namespace.location)
temp = next((x for x in sku_infos if x.name.lower() == size_info.lower()), None)
# For Stack (compute - 2017-03-30), Resource_sku doesn't implement location_info property
if not hasattr(temp, 'location_info'):
return
if not temp or not [x for x in (temp.location_info or []) if x.zones]:
raise CLIError("{}'s location can't be used to create the VM/VMSS because availablity zone is not yet "
"supported. Please use '--location' to specify a capable one. 'az vm list-skus' can be "
"used to find such locations".format(namespace.resource_group_name))
# pylint: disable=too-many-branches, too-many-statements
def _validate_vm_create_storage_profile(cmd, namespace, for_scale_set=False):
from msrestazure.tools import parse_resource_id
# use minimal parameters to resolve the expected storage profile
if getattr(namespace, 'attach_os_disk', None) and not namespace.image:
if namespace.use_unmanaged_disk:
# STORAGE PROFILE #3
namespace.storage_profile = StorageProfile.SASpecializedOSDisk
else:
# STORAGE PROFILE #6
namespace.storage_profile = StorageProfile.ManagedSpecializedOSDisk
elif namespace.image and not getattr(namespace, 'attach_os_disk', None):
image_type = _parse_image_argument(cmd, namespace)
if image_type == 'uri':
# STORAGE PROFILE #2
namespace.storage_profile = StorageProfile.SACustomImage
elif image_type == 'image_id':
# STORAGE PROFILE #5
namespace.storage_profile = StorageProfile.ManagedCustomImage
elif image_type == 'urn':
if namespace.use_unmanaged_disk:
# STORAGE PROFILE #1
namespace.storage_profile = StorageProfile.SAPirImage
else:
# STORAGE PROFILE #4
namespace.storage_profile = StorageProfile.ManagedPirImage
else:
raise CLIError('Unrecognized image type: {}'.format(image_type))
else:
# did not specify image XOR attach-os-disk
raise CLIError('incorrect usage: --image IMAGE | --attach-os-disk DISK')
auth_params = ['<PASSWORD>_password', 'admin_username', 'authentication_type',
'generate_ssh_keys', 'ssh_dest_key_path', 'ssh_key_value']
# perform parameter validation for the specific storage profile
# start with the required/forbidden parameters for VM
if namespace.storage_profile == StorageProfile.ManagedPirImage:
required = ['image']
forbidden = ['os_type', 'attach_os_disk', 'storage_account',
'storage_container_name', 'use_unmanaged_disk']
if for_scale_set:
forbidden.append('os_disk_name')
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.ManagedCustomImage:
required = ['image']
forbidden = ['os_type', 'attach_os_disk', 'storage_account',
'storage_container_name', 'use_unmanaged_disk']
if for_scale_set:
forbidden.append('os_disk_name')
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk:
required = ['os_type', 'attach_os_disk']
forbidden = ['os_disk_name', 'os_caching', 'storage_account',
'storage_container_name', 'use_unmanaged_disk', 'storage_sku'] + auth_params
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.SAPirImage:
required = ['image', 'use_unmanaged_disk']
forbidden = ['os_type', 'attach_os_disk', 'data_disk_sizes_gb']
elif namespace.storage_profile == StorageProfile.SACustomImage:
required = ['image', 'os_type', 'use_unmanaged_disk']
forbidden = ['attach_os_disk', 'data_disk_sizes_gb']
elif namespace.storage_profile == StorageProfile.SASpecializedOSDisk:
required = ['os_type', 'attach_os_disk', 'use_unmanaged_disk']
forbidden = ['os_disk_name', 'os_caching', 'image', 'storage_account',
'storage_container_name', 'data_disk_sizes_gb', 'storage_sku'] + auth_params
else:
raise CLIError('Unrecognized storage profile: {}'.format(namespace.storage_profile))
logger.debug("storage profile '%s'", namespace.storage_profile)
if for_scale_set:
# VMSS lacks some parameters, so scrub these out
props_to_remove = ['attach_os_disk', 'storage_account']
for prop in props_to_remove:
if prop in required:
required.remove(prop)
if prop in forbidden:
forbidden.remove(prop)
# set default storage SKU if not provided and using an image based OS
if not namespace.storage_sku and namespace.storage_profile in [StorageProfile.SAPirImage, StorageProfile.SACustomImage]: # pylint: disable=line-too-long
namespace.storage_sku = 'Standard_LRS' if for_scale_set else 'Premium_LRS'
if namespace.storage_sku == 'UltraSSD_LRS' and namespace.ultra_ssd_enabled is None:
namespace.ultra_ssd_enabled = True
# Now verify that the status of required and forbidden parameters
validate_parameter_set(
namespace, required, forbidden,
description='storage profile: {}:'.format(_get_storage_profile_description(namespace.storage_profile)))
image_data_disks_num = 0
if namespace.storage_profile == StorageProfile.ManagedCustomImage:
# extract additional information from a managed custom image
res = parse_resource_id(namespace.image)
compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription'])
if res['type'].lower() == 'images':
image_info = compute_client.images.get(res['resource_group'], res['name'])
namespace.os_type = image_info.storage_profile.os_disk.os_type.value
image_data_disks_num = len(image_info.storage_profile.data_disks or [])
elif res['type'].lower() == 'galleries':
image_info = compute_client.gallery_images.get(resource_group_name=res['resource_group'],
gallery_name=res['name'],
gallery_image_name=res['child_name_1'])
namespace.os_type = image_info.os_type.value
gallery_image_version = res.get('child_name_2', '')
if gallery_image_version.lower() in ['latest', '']:
image_version_infos = compute_client.gallery_image_versions.list_by_gallery_image(
resource_group_name=res['resource_group'], gallery_name=res['name'],
gallery_image_name=res['child_name_1'])
image_version_infos = [x for x in image_version_infos if not x.publishing_profile.exclude_from_latest]
if not image_version_infos:
raise CLIError('There is no latest image version exists for "{}"'.format(namespace.image))
image_version_info = sorted(image_version_infos, key=lambda x: x.publishing_profile.published_date)[-1]
else:
image_version_info = compute_client.gallery_image_versions.get(
resource_group_name=res['resource_group'], gallery_name=res['name'],
gallery_image_name=res['child_name_1'], gallery_image_version_name=res['child_name_2'])
image_data_disks_num = len(image_version_info.storage_profile.data_disk_images or [])
else:
raise CLIError('usage error: unrecognized image informations "{}"'.format(namespace.image))
# pylint: disable=no-member
elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk:
# accept disk name or ID
namespace.attach_os_disk = _get_resource_id(
cmd.cli_ctx, namespace.attach_os_disk, namespace.resource_group_name, 'disks', 'Microsoft.Compute')
if getattr(namespace, 'attach_data_disks', None):
if not namespace.use_unmanaged_disk:
namespace.attach_data_disks = [_get_resource_id(cmd.cli_ctx, d, namespace.resource_group_name, 'disks',
'Microsoft.Compute') for d in namespace.attach_data_disks]
if not namespace.os_type:
namespace.os_type = 'windows' if 'windows' in namespace.os_offer.lower() else 'linux'
from ._vm_utils import normalize_disk_info
# attach_data_disks are not exposed yet for VMSS, so use 'getattr' to avoid crash
namespace.disk_info = normalize_disk_info(image_data_disks_num=image_data_disks_num,
data_disk_sizes_gb=namespace.data_disk_sizes_gb,
attach_data_disks=getattr(namespace, 'attach_data_disks', []),
storage_sku=namespace.storage_sku,
os_disk_caching=namespace.os_caching,
data_disk_cachings=namespace.data_caching)
def _validate_vm_create_storage_account(cmd, namespace):
from msrestazure.tools import parse_resource_id
if namespace.storage_account:
storage_id = parse_resource_id(namespace.storage_account)
rg = storage_id.get('resource_group', namespace.resource_group_name)
if check_existence(cmd.cli_ctx, storage_id['name'], rg, 'Microsoft.Storage', 'storageAccounts'):
# 1 - existing storage account specified
namespace.storage_account_type = 'existing'
logger.debug("using specified existing storage account '%s'", storage_id['name'])
else:
# 2 - params for new storage account specified
namespace.storage_account_type = 'new'
logger.debug("specified storage account '%s' not found and will be created", storage_id['name'])
else:
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
storage_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_STORAGE).storage_accounts
# find storage account in target resource group that matches the VM's location
sku_tier = 'Premium' if 'Premium' in namespace.storage_sku else 'Standard'
account = next(
(a for a in storage_client.list_by_resource_group(namespace.resource_group_name)
if a.sku.tier.value == sku_tier and a.location == namespace.location), None)
if account:
# 3 - nothing specified - find viable storage account in target resource group
namespace.storage_account = account.name
namespace.storage_account_type = 'existing'
logger.debug("suitable existing storage account '%s' will be used", account.name)
else:
# 4 - nothing specified - create a new storage account
namespace.storage_account_type = 'new'
logger.debug('no suitable storage account found. One will be created.')
def _validate_vm_create_availability_set(cmd, namespace):
from msrestazure.tools import parse_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if namespace.availability_set:
as_id = parse_resource_id(namespace.availability_set)
name = as_id['name']
rg = as_id.get('resource_group', namespace.resource_group_name)
if not check_existence(cmd.cli_ctx, name, rg, 'Microsoft.Compute', 'availabilitySets'):
raise CLIError("Availability set '{}' does not exist.".format(name))
namespace.availability_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=rg,
namespace='Microsoft.Compute',
type='availabilitySets',
name=name)
logger.debug("adding to specified availability set '%s'", namespace.availability_set)
def _validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=False):
from msrestazure.tools import is_valid_resource_id
vnet = namespace.vnet_name
subnet = namespace.subnet
rg = namespace.resource_group_name
location = namespace.location
nics = getattr(namespace, 'nics', None)
if not vnet and not subnet and not nics:
logger.debug('no subnet specified. Attempting to find an existing Vnet and subnet...')
# if nothing specified, try to find an existing vnet and subnet in the target resource group
client = get_network_client(cmd.cli_ctx).virtual_networks
# find VNET in target resource group that matches the VM's location with a matching subnet
for vnet_match in (v for v in client.list(rg) if v.location == location and v.subnets):
# 1 - find a suitable existing vnet/subnet
result = None
if not for_scale_set:
result = next((s for s in vnet_match.subnets if s.name.lower() != 'gatewaysubnet'), None)
else:
def _check_subnet(s):
if s.name.lower() == 'gatewaysubnet':
return False
subnet_mask = s.address_prefix.split('/')[-1]
return _subnet_capacity_check(subnet_mask, namespace.instance_count,
not namespace.disable_overprovision)
result = next((s for s in vnet_match.subnets if _check_subnet(s)), None)
if not result:
continue
namespace.subnet = result.name
namespace.vnet_name = vnet_match.name
namespace.vnet_type = 'existing'
logger.debug("existing vnet '%s' and subnet '%s' found", namespace.vnet_name, namespace.subnet)
return
if subnet:
subnet_is_id = is_valid_resource_id(subnet)
if (subnet_is_id and vnet) or (not subnet_is_id and not vnet):
raise CLIError("incorrect '--subnet' usage: --subnet SUBNET_ID | "
"--subnet SUBNET_NAME --vnet-name VNET_NAME")
subnet_exists = \
check_existence(cmd.cli_ctx, subnet, rg, 'Microsoft.Network', 'subnets', vnet, 'virtualNetworks')
if subnet_is_id and not subnet_exists:
raise CLIError("Subnet '{}' does not exist.".format(subnet))
elif subnet_exists:
# 2 - user specified existing vnet/subnet
namespace.vnet_type = 'existing'
logger.debug("using specified vnet '%s' and subnet '%s'", namespace.vnet_name, namespace.subnet)
return
# 3 - create a new vnet/subnet
namespace.vnet_type = 'new'
logger.debug('no suitable subnet found. One will be created.')
def _subnet_capacity_check(subnet_mask, vmss_instance_count, over_provision):
mask = int(subnet_mask)
# '2' are the reserved broadcasting addresses
# '*1.5' so we have enough leeway for over-provision
factor = 1.5 if over_provision else 1
return ((1 << (32 - mask)) - 2) > int(vmss_instance_count * factor)
def _validate_vm_vmss_accelerated_networking(cli_ctx, namespace):
if namespace.accelerated_networking is None:
size = getattr(namespace, 'size', None) or getattr(namespace, 'vm_sku', None)
size = size.lower()
# to refresh the list, run 'az vm create --accelerated-networking --size Standard_DS1_v2' and
# get it from the error
aval_sizes = ['Standard_D3_v2', 'Standard_D12_v2', 'Standard_D3_v2_Promo', 'Standard_D12_v2_Promo',
'Standard_DS3_v2', 'Standard_DS12_v2', 'Standard_DS13-4_v2', 'Standard_DS14-4_v2',
'Standard_DS3_v2_Promo', 'Standard_DS12_v2_Promo', 'Standard_DS13-4_v2_Promo',
'Standard_DS14-4_v2_Promo', 'Standard_F4', 'Standard_F4s', 'Standard_D8_v3', 'Standard_D8s_v3',
'Standard_D32-8s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D3_v2_ABC',
'Standard_D12_v2_ABC', 'Standard_F4_ABC', 'Standard_F8s_v2', 'Standard_D4_v2',
'Standard_D13_v2', 'Standard_D4_v2_Promo', 'Standard_D13_v2_Promo', 'Standard_DS4_v2',
'Standard_DS13_v2', 'Standard_DS14-8_v2', 'Standard_DS4_v2_Promo', 'Standard_DS13_v2_Promo',
'Standard_DS14-8_v2_Promo', 'Standard_F8', 'Standard_F8s', 'Standard_M64-16ms',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D32-16s_v3', 'Standard_D64-16s_v3',
'Standard_E16_v3', 'Standard_E16s_v3', 'Standard_E32-16s_v3', 'Standard_D4_v2_ABC',
'Standard_D13_v2_ABC', 'Standard_F8_ABC', 'Standard_F16s_v2', 'Standard_D5_v2',
'Standard_D14_v2', 'Standard_D5_v2_Promo', 'Standard_D14_v2_Promo', 'Standard_DS5_v2',
'Standard_DS14_v2', 'Standard_DS5_v2_Promo', 'Standard_DS14_v2_Promo', 'Standard_F16',
'Standard_F16s', 'Standard_M64-32ms', 'Standard_M128-32ms', 'Standard_D32_v3',
'Standard_D32s_v3', 'Standard_D64-32s_v3', 'Standard_E32_v3', 'Standard_E32s_v3',
'Standard_E32-8s_v3', 'Standard_E32-16_v3', 'Standard_D5_v2_ABC', 'Standard_D14_v2_ABC',
'Standard_F16_ABC', 'Standard_F32s_v2', 'Standard_D15_v2', 'Standard_D15_v2_Promo',
'Standard_D15_v2_Nested', 'Standard_DS15_v2', 'Standard_DS15_v2_Promo',
'Standard_DS15_v2_Nested', 'Standard_D40_v3', 'Standard_D40s_v3', 'Standard_D15_v2_ABC',
'Standard_M64ms', 'Standard_M64s', 'Standard_M128-64ms', 'Standard_D64_v3', 'Standard_D64s_v3',
'Standard_E64_v3', 'Standard_E64s_v3', 'Standard_E64-16s_v3', 'Standard_E64-32s_v3',
'Standard_F64s_v2', 'Standard_F72s_v2', 'Standard_M128s', 'Standard_M128ms', 'Standard_L8s_v2',
'Standard_L16s_v2', 'Standard_L32s_v2', 'Standard_L64s_v2', 'Standard_L96s_v2', 'SQLGL',
'SQLGLCore', 'Standard_D4_v3', 'Standard_D4s_v3', 'Standard_D2_v2', 'Standard_DS2_v2',
'Standard_E4_v3', 'Standard_E4s_v3', 'Standard_F2', 'Standard_F2s', 'Standard_F4s_v2',
'Standard_D11_v2', 'Standard_DS11_v2', 'AZAP_Performance_ComputeV17C']
aval_sizes = [x.lower() for x in aval_sizes]
if size not in aval_sizes:
return
new_4core_sizes = ['Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D3_v2_ABC', 'Standard_DS3_v2',
'Standard_DS3_v2_Promo', 'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D12_v2_ABC',
'Standard_DS12_v2', 'Standard_DS12_v2_Promo', 'Standard_F8s_v2', 'Standard_F4',
'Standard_F4_ABC', 'Standard_F4s', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D8_v3',
'Standard_D8s_v3']
new_4core_sizes = [x.lower() for x in new_4core_sizes]
if size not in new_4core_sizes:
compute_client = _compute_client_factory(cli_ctx)
sizes = compute_client.virtual_machine_sizes.list(namespace.location)
size_info = next((s for s in sizes if s.name.lower() == size), None)
if size_info is None or size_info.number_of_cores < 8:
return
# VMs need to be a supported image in the marketplace
# Ubuntu 16.04, SLES 12 SP3, RHEL 7.4, CentOS 7.4, CoreOS Linux, Debian "Stretch" with backports kernel
# Oracle Linux 7.4, Windows Server 2016, Windows Server 2012R2
publisher, offer, sku = namespace.os_publisher, namespace.os_offer, namespace.os_sku
if not publisher:
return
publisher, offer, sku = publisher.lower(), offer.lower(), sku.lower()
distros = [('canonical', 'UbuntuServer', '^16.04'), ('suse', 'sles', '^12-sp3'), ('redhat', 'rhel', '^7.4'),
('openlogic', 'centos', '^7.4'), ('coreos', 'coreos', None), ('credativ', 'debian', '-backports'),
('oracle', 'oracle-linux', '^7.4'), ('MicrosoftWindowsServer', 'WindowsServer', '^2016'),
('MicrosoftWindowsServer', 'WindowsServer', '^2012-R2')]
import re
for p, o, s in distros:
if p.lower() == publisher and (o is None or o.lower() == offer) and (s is None or re.match(s, sku, re.I)):
namespace.accelerated_networking = True
def _validate_vmss_create_subnet(namespace):
if namespace.vnet_type == 'new':
if namespace.subnet_address_prefix is None:
cidr = namespace.vnet_address_prefix.split('/', 1)[0]
i = 0
for i in range(24, 16, -1):
if _subnet_capacity_check(i, namespace.instance_count, not namespace.disable_overprovision):
break
if i < 16:
err = "instance count '{}' is out of range of 2^16 subnet size'"
raise CLIError(err.format(namespace.instance_count))
namespace.subnet_address_prefix = '{}/{}'.format(cidr, i)
if namespace.app_gateway_type and namespace.app_gateway_subnet_address_prefix is None:
namespace.app_gateway_subnet_address_prefix = _get_next_subnet_addr_suffix(
namespace.vnet_address_prefix, namespace.subnet_address_prefix, 24)
def _get_next_subnet_addr_suffix(vnet_cidr, subnet_cidr, new_mask):
def _convert_to_int(address, bit_mask_len):
a, b, c, d = [int(x) for x in address.split('.')]
result = '{0:08b}{1:08b}{2:08b}{3:08b}'.format(a, b, c, d)
return int(result[:-bit_mask_len], 2)
error_msg = "usage error: --subnet-address-prefix value should be a subrange of --vnet-address-prefix's"
# extract vnet information needed to verify the defaults we are coming out
vnet_ip_address, mask = vnet_cidr.split('/')
vnet_bit_mask_len = 32 - int(mask)
vnet_int = _convert_to_int(vnet_ip_address, vnet_bit_mask_len)
subnet_ip_address, mask = subnet_cidr.split('/')
subnet_bit_mask_len = 32 - int(mask)
if vnet_bit_mask_len <= subnet_bit_mask_len:
raise CLIError(error_msg)
candidate_int = _convert_to_int(subnet_ip_address, subnet_bit_mask_len) + 1
if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int: # overflows?
candidate_int = candidate_int - 2 # try the other way around
if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int:
raise CLIError(error_msg)
# format back to the cidr
candaidate_str = '{0:32b}'.format(candidate_int << subnet_bit_mask_len)
return '{0}.{1}.{2}.{3}/{4}'.format(int(candaidate_str[0:8], 2), int(candaidate_str[8:16], 2),
int(candaidate_str[16:24], 2), int(candaidate_str[24:32], 2),
new_mask)
def _validate_vm_create_nsg(cmd, namespace):
if namespace.nsg:
if check_existence(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name,
'Microsoft.Network', 'networkSecurityGroups'):
namespace.nsg_type = 'existing'
logger.debug("using specified NSG '%s'", namespace.nsg)
else:
namespace.nsg_type = 'new'
logger.debug("specified NSG '%s' not found. It will be created.", namespace.nsg)
elif namespace.nsg == '':
namespace.nsg_type = None
logger.debug('no NSG will be used')
elif namespace.nsg is None:
namespace.nsg_type = 'new'
logger.debug('new NSG will be created')
def _validate_vmss_create_nsg(cmd, namespace):
if namespace.nsg:
namespace.nsg = _get_resource_id(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name,
'networkSecurityGroups', 'Microsoft.Network')
def _validate_vm_vmss_create_public_ip(cmd, namespace):
if namespace.public_ip_address:
if check_existence(cmd.cli_ctx, namespace.public_ip_address, namespace.resource_group_name,
'Microsoft.Network', 'publicIPAddresses'):
namespace.public_ip_address_type = 'existing'
logger.debug("using existing specified public IP '%s'", namespace.public_ip_address)
else:
namespace.public_ip_address_type = 'new'
logger.debug("specified public IP '%s' not found. It will be created.", namespace.public_ip_address)
elif namespace.public_ip_address == '':
namespace.public_ip_address_type = None
logger.debug('no public IP address will be used')
elif namespace.public_ip_address is None:
namespace.public_ip_address_type = 'new'
logger.debug('new public IP address will be created')
# Public-IP SKU is only exposed for VM. VMSS has no such needs so far
if getattr(namespace, 'public_ip_sku', None):
from azure.cli.core.profiles import ResourceType
PublicIPAddressSkuName, IPAllocationMethod = cmd.get_models('PublicIPAddressSkuName', 'IPAllocationMethod',
resource_type=ResourceType.MGMT_NETWORK)
if namespace.public_ip_sku == PublicIPAddressSkuName.standard.value:
if not namespace.public_ip_address_allocation:
namespace.public_ip_address_allocation = IPAllocationMethod.static.value
def _validate_vmss_create_public_ip(cmd, namespace):
if namespace.load_balancer_type is None and namespace.app_gateway_type is None:
if namespace.public_ip_address:
raise CLIError('--public-ip-address can only be used when creating a new load '
'balancer or application gateway frontend.')
namespace.public_ip_address = ''
_validate_vm_vmss_create_public_ip(cmd, namespace)
def _validate_vm_create_nics(cmd, namespace):
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
nics_value = namespace.nics
nics = []
if not nics_value:
namespace.nic_type = 'new'
logger.debug('new NIC will be created')
return
if not isinstance(nics_value, list):
nics_value = [nics_value]
for n in nics_value:
nics.append({
'id': n if '/' in n else resource_id(name=n,
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='networkInterfaces',
subscription=get_subscription_id(cmd.cli_ctx)),
'properties': {
'primary': nics_value[0] == n
}
})
namespace.nics = nics
namespace.nic_type = 'existing'
namespace.public_ip_address_type = None
logger.debug('existing NIC(s) will be used')
def _validate_vm_vmss_create_auth(namespace):
if namespace.storage_profile in [StorageProfile.ManagedSpecializedOSDisk,
StorageProfile.SASpecializedOSDisk]:
return
namespace.admin_username = _validate_admin_username(namespace.admin_username, namespace.os_type)
if not namespace.os_type:
raise CLIError("Unable to resolve OS type. Specify '--os-type' argument.")
if not namespace.authentication_type:
# apply default auth type (password for Windows, ssh for Linux) by examining the OS type
namespace.authentication_type = 'password' \
if (namespace.os_type.lower() == 'windows' or namespace.admin_password) else 'ssh'
if namespace.os_type.lower() == 'windows' and namespace.authentication_type == 'ssh':
raise CLIError('SSH not supported for Windows VMs.')
# validate proper arguments supplied based on the authentication type
if namespace.authentication_type == 'password':
if namespace.ssh_key_value or namespace.ssh_dest_key_path:
raise ValueError(
"incorrect usage for authentication-type 'password': "
"[--admin-username USERNAME] --admin-password PASSWORD")
from knack.prompting import prompt_pass, NoTTYException
try:
if not namespace.admin_password:
namespace.admin_password = prompt_pass('Admin Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify password in non-interactive mode.')
# validate password
_validate_admin_password(namespace.admin_password,
namespace.os_type)
elif namespace.authentication_type == 'ssh':
if namespace.admin_password:
raise ValueError('Admin password cannot be used with SSH authentication type')
validate_ssh_key(namespace)
if not namespace.ssh_dest_key_path:
namespace.ssh_dest_key_path = \
'/home/{}/.ssh/authorized_keys'.format(namespace.admin_username)
def _validate_admin_username(username, os_type):
import re
if not username:
raise CLIError("admin user name can not be empty")
is_linux = (os_type.lower() == 'linux')
# pylint: disable=line-too-long
pattern = (r'[\\\/"\[\]:|<>+=;,?*@#()!A-Z]+' if is_linux else r'[\\\/"\[\]:|<>+=;,?*@]+')
linux_err = r'admin user name cannot contain upper case character A-Z, special characters \/"[]:|<>+=;,?*@#()! or start with $ or -'
win_err = r'admin user name cannot contain special characters \/"[]:|<>+=;,?*@# or ends with .'
if re.findall(pattern, username):
raise CLIError(linux_err if is_linux else win_err)
if is_linux and re.findall(r'^[$-]+', username):
raise CLIError(linux_err)
if not is_linux and username.endswith('.'):
raise CLIError(win_err)
disallowed_user_names = [
"administrator", "admin", "user", "user1", "test", "user2",
"test1", "user3", "admin1", "1", "123", "a", "actuser", "adm",
"admin2", "aspnet", "backup", "console", "guest",
"owner", "root", "server", "sql", "support", "support_388945a0",
"sys", "test2", "test3", "user4", "user5"]
if username.lower() in disallowed_user_names:
raise CLIError("This user name '{}' meets the general requirements, but is specifically disallowed for this image. Please try a different value.".format(username))
return username
def _validate_admin_password(password, os_type):
import re
is_linux = (os_type.lower() == 'linux')
max_length = 72 if is_linux else 123
min_length = 12
if len(password) not in range(min_length, max_length + 1):
raise CLIError('The password length must be between {} and {}'.format(min_length,
max_length))
contains_lower = re.findall('[a-z]+', password)
contains_upper = re.findall('[A-Z]+', password)
contains_digit = re.findall('[0-9]+', password)
contains_special_char = re.findall(r'[ `~!@#$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password)
count = len([x for x in [contains_lower, contains_upper,
contains_digit, contains_special_char] if x])
# pylint: disable=line-too-long
if count < 3:
raise CLIError('Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character')
def validate_ssh_key(namespace):
string_or_file = (namespace.ssh_key_value or
os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub'))
content = string_or_file
if os.path.exists(string_or_file):
logger.info('Use existing SSH public key file: %s', string_or_file)
with open(string_or_file, 'r') as f:
content = f.read()
elif not keys.is_valid_ssh_rsa_public_key(content):
if namespace.generate_ssh_keys:
# figure out appropriate file names:
# 'base_name'(with private keys), and 'base_name.pub'(with public keys)
public_key_filepath = string_or_file
if public_key_filepath[-4:].lower() == '.pub':
private_key_filepath = public_key_filepath[:-4]
else:
private_key_filepath = public_key_filepath + '.private'
content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath)
logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to "
"allow SSH access to the VM. If using machines without "
"permanent storage, back up your keys to a safe location.",
private_key_filepath, public_key_filepath)
else:
raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. '
'You can use --generate-ssh-keys to let CLI generate one for you')
namespace.ssh_key_value = content
def _validate_vm_vmss_msi(cmd, namespace, from_set_command=False):
if from_set_command or namespace.assign_identity is not None:
identities = namespace.assign_identity or []
from ._vm_utils import MSI_LOCAL_ID
for i, _ in enumerate(identities):
if identities[i] != MSI_LOCAL_ID:
identities[i] = _get_resource_id(cmd.cli_ctx, identities[i], namespace.resource_group_name,
'userAssignedIdentities', 'Microsoft.ManagedIdentity')
if not namespace.identity_scope and getattr(namespace.identity_role, 'is_default', None) is None:
raise CLIError("usage error: '--role {}' is not applicable as the '--scope' is not provided".format(
namespace.identity_role))
user_assigned_identities = [x for x in identities if x != MSI_LOCAL_ID]
if user_assigned_identities and not cmd.supported_api_version(min_api='2017-12-01'):
raise CLIError('usage error: user assigned identity is only available under profile '
'with minimum Compute API version of 2017-12-01')
if namespace.identity_scope:
if identities and MSI_LOCAL_ID not in identities:
raise CLIError("usage error: '--scope'/'--role' is only applicable when assign system identity")
# keep 'identity_role' for output as logical name is more readable
setattr(namespace, 'identity_role_id', _resolve_role_id(cmd.cli_ctx, namespace.identity_role,
namespace.identity_scope))
elif namespace.identity_scope or getattr(namespace.identity_role, 'is_default', None) is None:
raise CLIError('usage error: --assign-identity [--scope SCOPE] [--role ROLE]')
def _resolve_role_id(cli_ctx, role, scope):
import re
import uuid
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION).role_definitions
role_id = None
if re.match(r'/subscriptions/.+/providers/Microsoft.Authorization/roleDefinitions/',
role, re.I):
role_id = role
else:
try:
uuid.UUID(role)
role_id = '/subscriptions/{}/providers/Microsoft.Authorization/roleDefinitions/{}'.format(
client.config.subscription_id, role)
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick an id from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def process_vm_create_namespace(cmd, namespace):
validate_tags(namespace)
_validate_location(cmd, namespace, namespace.zone, namespace.size)
validate_asg_names_or_ids(cmd, namespace)
_validate_vm_create_storage_profile(cmd, namespace)
if namespace.storage_profile in [StorageProfile.SACustomImage,
StorageProfile.SAPirImage]:
_validate_vm_create_storage_account(cmd, namespace)
_validate_vm_create_availability_set(cmd, namespace)
_validate_vm_vmss_create_vnet(cmd, namespace)
_validate_vm_create_nsg(cmd, namespace)
_validate_vm_vmss_create_public_ip(cmd, namespace)
_validate_vm_create_nics(cmd, namespace)
_validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace)
_validate_vm_vmss_create_auth(namespace)
if namespace.secrets:
_validate_secrets(namespace.secrets, namespace.os_type)
if namespace.license_type and namespace.os_type.lower() != 'windows':
raise CLIError('usage error: --license-type is only applicable on Windows VM')
_validate_vm_vmss_msi(cmd, namespace)
if namespace.boot_diagnostics_storage:
namespace.boot_diagnostics_storage = get_storage_blob_uri(cmd.cli_ctx, namespace.boot_diagnostics_storage)
# endregion
# region VMSS Create Validators
def _get_default_address_pool(cli_ctx, resource_group, balancer_name, balancer_type):
option_name = '--backend-pool-name'
client = getattr(get_network_client(cli_ctx), balancer_type, None)
if not client:
raise CLIError('unrecognized balancer type: {}'.format(balancer_type))
balancer = client.get(resource_group, balancer_name)
values = [x.name for x in balancer.backend_address_pools]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
elif not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0]
def _validate_vmss_single_placement_group(namespace):
if namespace.platform_fault_domain_count is not None and namespace.zones is None:
raise CLIError('usage error: --platform-fault-domain-count COUNT --zones ZONES')
if namespace.zones or namespace.instance_count > 100:
if namespace.single_placement_group is None:
namespace.single_placement_group = False
elif namespace.single_placement_group:
raise CLIError("usage error: '--single-placement-group' should be turned off for zonal scale-sets or with"
" 100+ instances")
def _validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
from azure.cli.core.profiles import ResourceType
std_lb_is_available = cmd.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK)
if namespace.load_balancer and namespace.application_gateway:
raise CLIError('incorrect usage: --load-balancer NAME_OR_ID | '
'--application-gateway NAME_OR_ID')
# Resolve the type of balancer (if any) being used
balancer_type = 'None'
if namespace.load_balancer is None and namespace.application_gateway is None:
if std_lb_is_available:
balancer_type = 'loadBalancer'
else: # needed for Stack profile 2017_03_09
balancer_type = 'loadBalancer' if namespace.single_placement_group is not False else 'applicationGateway'
logger.debug("W/o STD LB, defaulting to '%s' under because single placement group is disabled",
balancer_type)
elif namespace.load_balancer:
balancer_type = 'loadBalancer'
elif namespace.application_gateway:
balancer_type = 'applicationGateway'
if balancer_type == 'applicationGateway':
if namespace.application_gateway:
client = get_network_client(cmd.cli_ctx).application_gateways
try:
rg = parse_resource_id(namespace.application_gateway).get(
'resource_group', namespace.resource_group_name)
ag_name = parse_resource_id(namespace.application_gateway)['name']
client.get(rg, ag_name)
namespace.app_gateway_type = 'existing'
namespace.backend_pool_name = namespace.backend_pool_name or \
_get_default_address_pool(cmd.cli_ctx, rg, ag_name, 'application_gateways')
logger.debug("using specified existing application gateway '%s'", namespace.application_gateway)
except CloudError:
namespace.app_gateway_type = 'new'
logger.debug("application gateway '%s' not found. It will be created.", namespace.application_gateway)
elif namespace.application_gateway == '':
namespace.app_gateway_type = None
logger.debug('no application gateway will be used')
elif namespace.application_gateway is None:
namespace.app_gateway_type = 'new'
logger.debug('new application gateway will be created')
# AppGateway frontend
required = []
if namespace.app_gateway_type == 'new':
required.append('app_gateway_sku')
required.append('app_gateway_capacity')
if namespace.vnet_type != 'new':
required.append('app_gateway_subnet_address_prefix')
elif namespace.app_gateway_type == 'existing':
required.append('backend_pool_name')
forbidden = ['nat_pool_name', 'load_balancer', 'health_probe']
validate_parameter_set(namespace, required, forbidden, description='network balancer: application gateway')
elif balancer_type == 'loadBalancer':
# LoadBalancer frontend
required = []
forbidden = ['app_gateway_subnet_address_prefix', 'application_gateway', 'app_gateway_sku',
'app_gateway_capacity']
validate_parameter_set(namespace, required, forbidden, description='network balancer: load balancer')
if namespace.load_balancer:
rg = parse_resource_id(namespace.load_balancer).get('resource_group', namespace.resource_group_name)
lb_name = parse_resource_id(namespace.load_balancer)['name']
lb = get_network_lb(cmd.cli_ctx, namespace.resource_group_name, lb_name)
if lb:
namespace.load_balancer_type = 'existing'
namespace.backend_pool_name = namespace.backend_pool_name or \
_get_default_address_pool(cmd.cli_ctx, rg, lb_name, 'load_balancers')
if not namespace.nat_pool_name:
if len(lb.inbound_nat_pools) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' explicitly.".format( # pylint: disable=line-too-long
'--nat-pool-name', ', '.join([n.name for n in lb.inbound_nat_pools])))
elif not lb.inbound_nat_pools: # Associated scaleset will be missing ssh/rdp, so warn here.
logger.warning("No inbound nat pool was configured on '%s'", namespace.load_balancer)
else:
namespace.nat_pool_name = lb.inbound_nat_pools[0].name
logger.debug("using specified existing load balancer '%s'", namespace.load_balancer)
else:
namespace.load_balancer_type = 'new'
logger.debug("load balancer '%s' not found. It will be created.", namespace.load_balancer)
elif namespace.load_balancer == '':
namespace.load_balancer_type = None
logger.debug('no load balancer will be used')
elif namespace.load_balancer is None:
namespace.load_balancer_type = 'new'
logger.debug('new load balancer will be created')
if namespace.load_balancer_type == 'new' and namespace.single_placement_group is False and std_lb_is_available:
LBSkuName = cmd.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
if namespace.load_balancer_sku is None:
namespace.load_balancer_sku = LBSkuName.standard.value
logger.debug("use Standard sku as single placement group is turned off")
elif namespace.load_balancer_sku == LBSkuName.basic.value:
if namespace.zones:
err = "'Standard' load balancer is required for zonal scale-sets"
elif namespace.instance_count > 100:
err = "'Standard' load balancer is required for scale-sets with 100+ instances"
else:
err = "'Standard' load balancer is required because 'single placement group' is turned off"
raise CLIError('usage error:{}'.format(err))
def get_network_client(cli_ctx):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK, api_version=get_target_network_api(cli_ctx))
def get_network_lb(cli_ctx, resource_group_name, lb_name):
from msrestazure.azure_exceptions import CloudError
network_client = get_network_client(cli_ctx)
try:
return network_client.load_balancers.get(resource_group_name, lb_name)
except CloudError:
return None
def process_vmss_create_namespace(cmd, namespace):
validate_tags(namespace)
if namespace.vm_sku is None:
from azure.cli.core.cloud import AZURE_US_GOV_CLOUD
if cmd.cli_ctx.cloud.name != AZURE_US_GOV_CLOUD.name:
namespace.vm_sku = 'Standard_DS1_v2'
else:
namespace.vm_sku = 'Standard_D1_v2'
_validate_location(cmd, namespace, namespace.zones, namespace.vm_sku)
validate_asg_names_or_ids(cmd, namespace)
_validate_vm_create_storage_profile(cmd, namespace, for_scale_set=True)
_validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=True)
_validate_vmss_single_placement_group(namespace)
_validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace)
_validate_vmss_create_subnet(namespace)
_validate_vmss_create_public_ip(cmd, namespace)
_validate_vmss_create_nsg(cmd, namespace)
_validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace)
_validate_vm_vmss_create_auth(namespace)
_validate_vm_vmss_msi(cmd, namespace)
if namespace.license_type and namespace.os_type.lower() != 'windows':
raise CLIError('usage error: --license-type is only applicable on Windows VM scaleset')
if not namespace.public_ip_per_vm and namespace.vm_domain_name:
raise CLIError('Usage error: --vm-domain-name can only be used when --public-ip-per-vm is enabled')
if namespace.eviction_policy and not namespace.priority:
raise CLIError('Usage error: --priority PRIORITY [--eviction-policy POLICY]')
# endregion
# region disk, snapshot, image validators
def validate_vm_disk(cmd, namespace):
namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk,
namespace.resource_group_name, 'disks', 'Microsoft.Compute')
def validate_vmss_disk(cmd, namespace):
if namespace.disk:
namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk,
namespace.resource_group_name, 'disks', 'Microsoft.Compute')
if bool(namespace.disk) == bool(namespace.size_gb):
raise CLIError('usage error: --disk EXIST_DISK --instance-id ID | --size-gb GB')
elif bool(namespace.disk) != bool(namespace.instance_id):
raise CLIError('usage error: --disk EXIST_DISK --instance-id ID')
def process_disk_or_snapshot_create_namespace(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
validate_tags(namespace)
if namespace.source:
usage_error = 'usage error: --source {SNAPSHOT | DISK} | --source VHD_BLOB_URI [--source-storage-account-id ID]'
try:
namespace.source_blob_uri, namespace.source_disk, namespace.source_snapshot = _figure_out_storage_source(
cmd.cli_ctx, namespace.resource_group_name, namespace.source)
if not namespace.source_blob_uri and namespace.source_storage_account_id:
raise CLIError(usage_error)
except CloudError:
raise CLIError(usage_error)
def process_image_create_namespace(cmd, namespace):
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
validate_tags(namespace)
try:
# try capturing from VM, a most common scenario
res_id = _get_resource_id(cmd.cli_ctx, namespace.source, namespace.resource_group_name,
'virtualMachines', 'Microsoft.Compute')
res = parse_resource_id(res_id)
compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription'])
vm_info = compute_client.virtual_machines.get(res['resource_group'], res['name'])
# pylint: disable=no-member
namespace.os_type = vm_info.storage_profile.os_disk.os_type.value
namespace.source_virtual_machine = res_id
if namespace.data_disk_sources:
raise CLIError("'--data-disk-sources' is not allowed when capturing "
"images from virtual machines")
except CloudError:
namespace.os_blob_uri, namespace.os_disk, namespace.os_snapshot = _figure_out_storage_source(cmd.cli_ctx, namespace.resource_group_name, namespace.source) # pylint: disable=line-too-long
namespace.data_blob_uris = []
namespace.data_disks = []
namespace.data_snapshots = []
if namespace.data_disk_sources:
for data_disk_source in namespace.data_disk_sources:
source_blob_uri, source_disk, source_snapshot = _figure_out_storage_source(
cmd.cli_ctx, namespace.resource_group_name, data_disk_source)
if source_blob_uri:
namespace.data_blob_uris.append(source_blob_uri)
if source_disk:
namespace.data_disks.append(source_disk)
if source_snapshot:
namespace.data_snapshots.append(source_snapshot)
if not namespace.os_type:
raise CLIError("usage error: os type is required to create the image, "
"please specify '--os-type OS_TYPE'")
def _figure_out_storage_source(cli_ctx, resource_group_name, source):
from msrestazure.azure_exceptions import CloudError
source_blob_uri = None
source_disk = None
source_snapshot = None
if urlparse(source).scheme: # a uri?
source_blob_uri = source
elif '/disks/' in source.lower():
source_disk = source
elif '/snapshots/' in source.lower():
source_snapshot = source
else:
compute_client = _compute_client_factory(cli_ctx)
# pylint: disable=no-member
try:
info = compute_client.snapshots.get(resource_group_name, source)
source_snapshot = info.id
except CloudError:
info = compute_client.disks.get(resource_group_name, source)
source_disk = info.id
return (source_blob_uri, source_disk, source_snapshot)
def process_disk_encryption_namespace(cmd, namespace):
namespace.disk_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.disk_encryption_keyvault,
namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
if namespace.key_encryption_keyvault:
if not namespace.key_encryption_key:
raise CLIError("Incorrect usage '--key-encryption-keyvault': "
"'--key-encryption-key' is required")
namespace.key_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.key_encryption_keyvault,
namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
def process_assign_identity_namespace(cmd, namespace):
_validate_vm_vmss_msi(cmd, namespace, from_set_command=True)
def process_remove_identity_namespace(cmd, namespace):
if namespace.identities:
from ._vm_utils import MSI_LOCAL_ID
for i in range(len(namespace.identities)):
if namespace.identities[i] != MSI_LOCAL_ID:
namespace.identities[i] = _get_resource_id(cmd.cli_ctx, namespace.identities[i],
namespace.resource_group_name,
'userAssignedIdentities',
'Microsoft.ManagedIdentity')
# TODO move to its own command module https://github.com/Azure/azure-cli/issues/5105
def process_msi_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
def process_gallery_image_version_namespace(cmd, namespace):
TargetRegion = cmd.get_models('TargetRegion')
if namespace.target_regions:
regions_info = []
for t in namespace.target_regions:
parts = t.split('=', 1)
if len(parts) == 1:
regions_info.append(TargetRegion(name=parts[0]))
else:
try:
replica_count = int(parts[1])
except ValueError:
raise CLIError("usage error: {}'s replica count must be an integer".format(parts[0]))
regions_info.append(TargetRegion(name=parts[0], regional_replica_count=replica_count))
namespace.target_regions = regions_info
# endregion
| [
"knack.log.get_logger",
"azure.cli.command_modules.vm._vm_utils.check_existence",
"azure.cli.core.commands.validators.validate_file_or_dict",
"urlparse.urlparse",
"azure.cli.command_modules.vm._vm_utils.get_target_network_api",
"os.path.exists",
"azure.cli.core.util.hash_string",
"knack.prompting.prompt_pass",
"azure.cli.command_modules.vm._vm_utils.get_storage_blob_uri",
"msrestazure.tools.resource_id",
"azure.cli.core.commands.client_factory.get_mgmt_service_client",
"msrestazure.tools.parse_resource_id",
"os.path.expanduser",
"azure.cli.core.keys.generate_ssh_keys",
"re.match",
"azure.cli.core.commands.validators.get_default_location_from_resource_group",
"knack.util.CLIError",
"re.findall",
"azure.cli.core.keys.is_valid_ssh_rsa_public_key",
"uuid.UUID",
"azure.cli.command_modules.vm._actions.load_images_from_aliases_doc",
"msrestazure.tools.is_valid_resource_id",
"azure.cli.core.commands.client_factory.get_subscription_id",
"azure.cli.core.commands.validators.validate_parameter_set",
"azure.cli.core.commands.validators.validate_tags"
] | [((1119, 1139), 'knack.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1129, 1139), False, 'from knack.log import get_logger\n'), ((1620, 1652), 'azure.cli.core.commands.client_factory.get_subscription_id', 'get_subscription_id', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (1639, 1652), False, 'from azure.cli.core.commands.client_factory import get_subscription_id\n'), ((3143, 3228), 'knack.util.CLIError', 'CLIError', (['"""usage error: [--keyvault NAME --resource-group NAME | --keyvault ID]"""'], {}), "('usage error: [--keyvault NAME --resource-group NAME | --keyvault ID]'\n )\n", (3151, 3228), False, 'from knack.util import CLIError\n'), ((4431, 4456), 'msrestazure.tools.is_valid_resource_id', 'is_valid_resource_id', (['val'], {}), '(val)\n', (4451, 4456), False, 'from msrestazure.tools import is_valid_resource_id\n'), ((8181, 8218), 'msrestazure.tools.is_valid_resource_id', 'is_valid_resource_id', (['namespace.image'], {}), '(namespace.image)\n', (8201, 8218), False, 'from msrestazure.tools import is_valid_resource_id\n'), ((8305, 8365), 're.match', 're.match', (['"""([^:]*):([^:]*):([^:]*):([^:]*)"""', 'namespace.image'], {}), "('([^:]*):([^:]*):([^:]*):([^:]*)', namespace.image)\n", (8313, 8365), False, 'import re\n'), ((9225, 9266), 'azure.cli.command_modules.vm._actions.load_images_from_aliases_doc', 'load_images_from_aliases_doc', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (9253, 9266), False, 'from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc\n'), ((42574, 42603), 're.findall', 're.findall', (['pattern', 'username'], {}), '(pattern, username)\n', (42584, 42603), False, 'import re\n'), ((43859, 43889), 're.findall', 're.findall', (['"""[a-z]+"""', 'password'], {}), "('[a-z]+', password)\n", (43869, 43889), False, 'import re\n'), ((43911, 43941), 're.findall', 're.findall', (['"""[A-Z]+"""', 'password'], {}), "('[A-Z]+', password)\n", (43921, 43941), False, 'import re\n'), ((43963, 43993), 're.findall', 're.findall', (['"""[0-9]+"""', 'password'], {}), "('[0-9]+', password)\n", (43973, 43993), False, 'import re\n'), ((44022, 44093), 're.findall', 're.findall', (['"""[ `~!@#$%^&*()=+_\\\\[\\\\]{}\\\\|;:.\\\\/\\\\\'\\\\",<>?]+"""', 'password'], {}), '(\'[ `~!@#$%^&*()=+_\\\\[\\\\]{}\\\\|;:.\\\\/\\\\\\\'\\\\",<>?]+\', password)\n', (44032, 44093), False, 'import re\n'), ((44631, 44661), 'os.path.exists', 'os.path.exists', (['string_or_file'], {}), '(string_or_file)\n', (44645, 44661), False, 'import os\n'), ((48120, 48217), 're.match', 're.match', (['"""/subscriptions/.+/providers/Microsoft.Authorization/roleDefinitions/"""', 'role', 're.I'], {}), "('/subscriptions/.+/providers/Microsoft.Authorization/roleDefinitions/'\n , role, re.I)\n", (48128, 48217), False, 'import re\n'), ((49084, 49108), 'azure.cli.core.commands.validators.validate_tags', 'validate_tags', (['namespace'], {}), '(namespace)\n', (49097, 49108), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((58711, 58735), 'azure.cli.core.commands.validators.validate_tags', 'validate_tags', (['namespace'], {}), '(namespace)\n', (58724, 58735), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((61092, 61116), 'azure.cli.core.commands.validators.validate_tags', 'validate_tags', (['namespace'], {}), '(namespace)\n', (61105, 61116), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((61835, 61859), 'azure.cli.core.commands.validators.validate_tags', 'validate_tags', (['namespace'], {}), '(namespace)\n', (61848, 61859), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((66325, 66381), 'azure.cli.core.commands.validators.get_default_location_from_resource_group', 'get_default_location_from_resource_group', (['cmd', 'namespace'], {}), '(cmd, namespace)\n', (66365, 66381), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((66386, 66410), 'azure.cli.core.commands.validators.validate_tags', 'validate_tags', (['namespace'], {}), '(namespace)\n', (66399, 66410), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((3940, 4000), 'azure.cli.core.commands.client_factory.get_mgmt_service_client', 'get_mgmt_service_client', (['cli_ctx', 'ResourceType.MGMT_KEYVAULT'], {}), '(cli_ctx, ResourceType.MGMT_KEYVAULT)\n', (3963, 4000), False, 'from azure.cli.core.commands.client_factory import get_mgmt_service_client\n'), ((4059, 4086), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['vault.id'], {}), '(vault.id)\n', (4076, 4086), False, 'from msrestazure.tools import parse_resource_id\n'), ((4652, 4680), 'azure.cli.core.commands.client_factory.get_subscription_id', 'get_subscription_id', (['cli_ctx'], {}), '(cli_ctx)\n', (4671, 4680), False, 'from azure.cli.core.commands.client_factory import get_subscription_id\n'), ((4763, 4784), 'msrestazure.tools.resource_id', 'resource_id', ([], {}), '(**kwargs)\n', (4774, 4784), False, 'from msrestazure.tools import resource_id\n'), ((9019, 9044), 'urlparse.urlparse', 'urlparse', (['namespace.image'], {}), '(namespace.image)\n', (9027, 9044), False, 'from urlparse import urlparse\n'), ((12583, 12639), 'azure.cli.core.commands.validators.get_default_location_from_resource_group', 'get_default_location_from_resource_group', (['cmd', 'namespace'], {}), '(cmd, namespace)\n', (12623, 12639), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((18462, 18496), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['namespace.image'], {}), '(namespace.image)\n', (18479, 18496), False, 'from msrestazure.tools import parse_resource_id\n'), ((22187, 22231), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['namespace.storage_account'], {}), '(namespace.storage_account)\n', (22204, 22231), False, 'from msrestazure.tools import parse_resource_id\n'), ((22320, 22416), 'azure.cli.command_modules.vm._vm_utils.check_existence', 'check_existence', (['cmd.cli_ctx', "storage_id['name']", 'rg', '"""Microsoft.Storage"""', '"""storageAccounts"""'], {}), "(cmd.cli_ctx, storage_id['name'], rg, 'Microsoft.Storage',\n 'storageAccounts')\n", (22335, 22416), False, 'from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri\n'), ((24266, 24311), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['namespace.availability_set'], {}), '(namespace.availability_set)\n', (24283, 24311), False, 'from msrestazure.tools import parse_resource_id\n'), ((26802, 26830), 'msrestazure.tools.is_valid_resource_id', 'is_valid_resource_id', (['subnet'], {}), '(subnet)\n', (26822, 26830), False, 'from msrestazure.tools import is_valid_resource_id\n'), ((27093, 27194), 'azure.cli.command_modules.vm._vm_utils.check_existence', 'check_existence', (['cmd.cli_ctx', 'subnet', 'rg', '"""Microsoft.Network"""', '"""subnets"""', 'vnet', '"""virtualNetworks"""'], {}), "(cmd.cli_ctx, subnet, rg, 'Microsoft.Network', 'subnets',\n vnet, 'virtualNetworks')\n", (27108, 27194), False, 'from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri\n'), ((35113, 35132), 'knack.util.CLIError', 'CLIError', (['error_msg'], {}), '(error_msg)\n', (35121, 35132), False, 'from knack.util import CLIError\n'), ((35939, 36063), 'azure.cli.command_modules.vm._vm_utils.check_existence', 'check_existence', (['cmd.cli_ctx', 'namespace.nsg', 'namespace.resource_group_name', '"""Microsoft.Network"""', '"""networkSecurityGroups"""'], {}), "(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name,\n 'Microsoft.Network', 'networkSecurityGroups')\n", (35954, 36063), False, 'from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri\n'), ((36932, 37067), 'azure.cli.command_modules.vm._vm_utils.check_existence', 'check_existence', (['cmd.cli_ctx', 'namespace.public_ip_address', 'namespace.resource_group_name', '"""Microsoft.Network"""', '"""publicIPAddresses"""'], {}), "(cmd.cli_ctx, namespace.public_ip_address, namespace.\n resource_group_name, 'Microsoft.Network', 'publicIPAddresses')\n", (36947, 37067), False, 'from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri\n'), ((40286, 40354), 'knack.util.CLIError', 'CLIError', (['"""Unable to resolve OS type. Specify \'--os-type\' argument."""'], {}), '("Unable to resolve OS type. Specify \'--os-type\' argument.")\n', (40294, 40354), False, 'from knack.util import CLIError\n'), ((40748, 40794), 'knack.util.CLIError', 'CLIError', (['"""SSH not supported for Windows VMs."""'], {}), "('SSH not supported for Windows VMs.')\n", (40756, 40794), False, 'from knack.util import CLIError\n'), ((42111, 42155), 'knack.util.CLIError', 'CLIError', (['"""admin user name can not be empty"""'], {}), "('admin user name can not be empty')\n", (42119, 42155), False, 'from knack.util import CLIError\n'), ((42619, 42663), 'knack.util.CLIError', 'CLIError', (['(linux_err if is_linux else win_err)'], {}), '(linux_err if is_linux else win_err)\n', (42627, 42663), False, 'from knack.util import CLIError\n'), ((42684, 42714), 're.findall', 're.findall', (['"""^[$-]+"""', 'username'], {}), "('^[$-]+', username)\n", (42694, 42714), False, 'import re\n'), ((42731, 42750), 'knack.util.CLIError', 'CLIError', (['linux_err'], {}), '(linux_err)\n', (42739, 42750), False, 'from knack.util import CLIError\n'), ((42813, 42830), 'knack.util.CLIError', 'CLIError', (['win_err'], {}), '(win_err)\n', (42821, 42830), False, 'from knack.util import CLIError\n'), ((44292, 44437), 'knack.util.CLIError', 'CLIError', (['"""Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character"""'], {}), "(\n 'Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character'\n )\n", (44300, 44437), False, 'from knack.util import CLIError\n'), ((48010, 48075), 'azure.cli.core.commands.client_factory.get_mgmt_service_client', 'get_mgmt_service_client', (['cli_ctx', 'ResourceType.MGMT_AUTHORIZATION'], {}), '(cli_ctx, ResourceType.MGMT_AUTHORIZATION)\n', (48033, 48075), False, 'from azure.cli.core.commands.client_factory import get_mgmt_service_client\n'), ((50018, 50090), 'knack.util.CLIError', 'CLIError', (['"""usage error: --license-type is only applicable on Windows VM"""'], {}), "('usage error: --license-type is only applicable on Windows VM')\n", (50026, 50090), False, 'from knack.util import CLIError\n'), ((50221, 50290), 'azure.cli.command_modules.vm._vm_utils.get_storage_blob_uri', 'get_storage_blob_uri', (['cmd.cli_ctx', 'namespace.boot_diagnostics_storage'], {}), '(cmd.cli_ctx, namespace.boot_diagnostics_storage)\n', (50241, 50290), False, 'from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri\n'), ((51278, 51352), 'knack.util.CLIError', 'CLIError', (['"""usage error: --platform-fault-domain-count COUNT --zones ZONES"""'], {}), "('usage error: --platform-fault-domain-count COUNT --zones ZONES')\n", (51286, 51352), False, 'from knack.util import CLIError\n'), ((52160, 52260), 'knack.util.CLIError', 'CLIError', (['"""incorrect usage: --load-balancer NAME_OR_ID | --application-gateway NAME_OR_ID"""'], {}), "(\n 'incorrect usage: --load-balancer NAME_OR_ID | --application-gateway NAME_OR_ID'\n )\n", (52168, 52260), False, 'from knack.util import CLIError\n'), ((54812, 54924), 'azure.cli.core.commands.validators.validate_parameter_set', 'validate_parameter_set', (['namespace', 'required', 'forbidden'], {'description': '"""network balancer: application gateway"""'}), "(namespace, required, forbidden, description=\n 'network balancer: application gateway')\n", (54834, 54924), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((59780, 59866), 'knack.util.CLIError', 'CLIError', (['"""usage error: --license-type is only applicable on Windows VM scaleset"""'], {}), "(\n 'usage error: --license-type is only applicable on Windows VM scaleset')\n", (59788, 59866), False, 'from knack.util import CLIError\n'), ((59945, 60048), 'knack.util.CLIError', 'CLIError', (['"""Usage error: --vm-domain-name can only be used when --public-ip-per-vm is enabled"""'], {}), "(\n 'Usage error: --vm-domain-name can only be used when --public-ip-per-vm is enabled'\n )\n", (59953, 60048), False, 'from knack.util import CLIError\n'), ((60115, 60186), 'knack.util.CLIError', 'CLIError', (['"""Usage error: --priority PRIORITY [--eviction-policy POLICY]"""'], {}), "('Usage error: --priority PRIORITY [--eviction-policy POLICY]')\n", (60123, 60186), False, 'from knack.util import CLIError\n'), ((60756, 60830), 'knack.util.CLIError', 'CLIError', (['"""usage error: --disk EXIST_DISK --instance-id ID | --size-gb GB"""'], {}), "('usage error: --disk EXIST_DISK --instance-id ID | --size-gb GB')\n", (60764, 60830), False, 'from knack.util import CLIError\n'), ((62109, 62134), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['res_id'], {}), '(res_id)\n', (62126, 62134), False, 'from msrestazure.tools import parse_resource_id\n'), ((63971, 63987), 'urlparse.urlparse', 'urlparse', (['source'], {}), '(source)\n', (63979, 63987), False, 'from urlparse import urlparse\n'), ((1845, 1870), 'msrestazure.tools.is_valid_resource_id', 'is_valid_resource_id', (['val'], {}), '(val)\n', (1865, 1870), False, 'from msrestazure.tools import is_valid_resource_id\n'), ((1890, 2041), 'msrestazure.tools.resource_id', 'resource_id', ([], {'subscription': 'subscription_id', 'resource_group': 'resource_group', 'namespace': '"""Microsoft.Network"""', 'type': '"""applicationSecurityGroups"""', 'name': 'val'}), "(subscription=subscription_id, resource_group=resource_group,\n namespace='Microsoft.Network', type='applicationSecurityGroups', name=val)\n", (1901, 2041), False, 'from msrestazure.tools import resource_id\n'), ((2600, 2632), 'azure.cli.core.commands.client_factory.get_subscription_id', 'get_subscription_id', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (2619, 2632), False, 'from azure.cli.core.commands.client_factory import get_subscription_id\n'), ((2767, 2795), 'azure.cli.core.util.hash_string', 'hash_string', (['vm_id'], {'length': '(8)'}), '(vm_id, length=8)\n', (2778, 2795), False, 'from azure.cli.core.util import hash_string\n'), ((3324, 3348), 'msrestazure.tools.is_valid_resource_id', 'is_valid_resource_id', (['kv'], {}), '(kv)\n', (3344, 3348), False, 'from msrestazure.tools import is_valid_resource_id\n'), ((6126, 6155), 'azure.cli.core.commands.validators.validate_file_or_dict', 'validate_file_or_dict', (['secret'], {}), '(secret)\n', (6147, 6155), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((14932, 14998), 'knack.util.CLIError', 'CLIError', (['"""incorrect usage: --image IMAGE | --attach-os-disk DISK"""'], {}), "('incorrect usage: --image IMAGE | --attach-os-disk DISK')\n", (14940, 14998), False, 'from knack.util import CLIError\n'), ((23025, 23088), 'azure.cli.core.commands.client_factory.get_mgmt_service_client', 'get_mgmt_service_client', (['cmd.cli_ctx', 'ResourceType.MGMT_STORAGE'], {}), '(cmd.cli_ctx, ResourceType.MGMT_STORAGE)\n', (23048, 23088), False, 'from azure.cli.core.commands.client_factory import get_mgmt_service_client\n'), ((24429, 24508), 'azure.cli.command_modules.vm._vm_utils.check_existence', 'check_existence', (['cmd.cli_ctx', 'name', 'rg', '"""Microsoft.Compute"""', '"""availabilitySets"""'], {}), "(cmd.cli_ctx, name, rg, 'Microsoft.Compute', 'availabilitySets')\n", (24444, 24508), False, 'from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri\n'), ((26920, 27033), 'knack.util.CLIError', 'CLIError', (['"""incorrect \'--subnet\' usage: --subnet SUBNET_ID | --subnet SUBNET_NAME --vnet-name VNET_NAME"""'], {}), '(\n "incorrect \'--subnet\' usage: --subnet SUBNET_ID | --subnet SUBNET_NAME --vnet-name VNET_NAME"\n )\n', (26928, 27033), False, 'from knack.util import CLIError\n'), ((35480, 35499), 'knack.util.CLIError', 'CLIError', (['error_msg'], {}), '(error_msg)\n', (35488, 35499), False, 'from knack.util import CLIError\n'), ((38562, 38687), 'knack.util.CLIError', 'CLIError', (['"""--public-ip-address can only be used when creating a new load balancer or application gateway frontend."""'], {}), "(\n '--public-ip-address can only be used when creating a new load balancer or application gateway frontend.'\n )\n", (38570, 38687), False, 'from knack.util import CLIError\n'), ((44547, 44570), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (44565, 44570), False, 'import os\n'), ((44828, 44869), 'azure.cli.core.keys.is_valid_ssh_rsa_public_key', 'keys.is_valid_ssh_rsa_public_key', (['content'], {}), '(content)\n', (44860, 44869), True, 'import azure.cli.core.keys as keys\n'), ((46954, 47090), 'knack.util.CLIError', 'CLIError', (['"""usage error: user assigned identity is only available under profile with minimum Compute API version of 2017-12-01"""'], {}), "(\n 'usage error: user assigned identity is only available under profile with minimum Compute API version of 2017-12-01'\n )\n", (46962, 47090), False, 'from knack.util import CLIError\n'), ((47716, 47788), 'knack.util.CLIError', 'CLIError', (['"""usage error: --assign-identity [--scope SCOPE] [--role ROLE]"""'], {}), "('usage error: --assign-identity [--scope SCOPE] [--role ROLE]')\n", (47724, 47788), False, 'from knack.util import CLIError\n'), ((48289, 48304), 'uuid.UUID', 'uuid.UUID', (['role'], {}), '(role)\n', (48298, 48304), False, 'import uuid\n'), ((55170, 55276), 'azure.cli.core.commands.validators.validate_parameter_set', 'validate_parameter_set', (['namespace', 'required', 'forbidden'], {'description': '"""network balancer: load balancer"""'}), "(namespace, required, forbidden, description=\n 'network balancer: load balancer')\n", (55192, 55276), False, 'from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags\n'), ((58324, 58355), 'azure.cli.command_modules.vm._vm_utils.get_target_network_api', 'get_target_network_api', (['cli_ctx'], {}), '(cli_ctx)\n', (58346, 58355), False, 'from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri\n'), ((60907, 60966), 'knack.util.CLIError', 'CLIError', (['"""usage error: --disk EXIST_DISK --instance-id ID"""'], {}), "('usage error: --disk EXIST_DISK --instance-id ID')\n", (60915, 60966), False, 'from knack.util import CLIError\n'), ((62542, 62644), 'knack.util.CLIError', 'CLIError', (['"""\'--data-disk-sources\' is not allowed when capturing images from virtual machines"""'], {}), '(\n "\'--data-disk-sources\' is not allowed when capturing images from virtual machines"\n )\n', (62550, 62644), False, 'from knack.util import CLIError\n'), ((65056, 65157), 'knack.util.CLIError', 'CLIError', (['"""Incorrect usage \'--key-encryption-keyvault\': \'--key-encryption-key\' is required"""'], {}), '(\n "Incorrect usage \'--key-encryption-keyvault\': \'--key-encryption-key\' is required"\n )\n', (65064, 65157), False, 'from knack.util import CLIError\n'), ((3457, 3481), 'msrestazure.tools.is_valid_resource_id', 'is_valid_resource_id', (['kv'], {}), '(kv)\n', (3477, 3481), False, 'from msrestazure.tools import is_valid_resource_id\n'), ((24667, 24699), 'azure.cli.core.commands.client_factory.get_subscription_id', 'get_subscription_id', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (24686, 24699), False, 'from azure.cli.core.commands.client_factory import get_subscription_id\n'), ((41329, 41374), 'knack.prompting.prompt_pass', 'prompt_pass', (['"""Admin Password: """'], {'confirm': '(True)'}), "('Admin Password: ', confirm=True)\n", (41340, 41374), False, 'from knack.prompting import prompt_pass, NoTTYException\n'), ((41424, 41484), 'knack.util.CLIError', 'CLIError', (['"""Please specify password in non-interactive mode."""'], {}), "('Please specify password in non-interactive mode.')\n", (41432, 41484), False, 'from knack.util import CLIError\n'), ((45328, 45393), 'azure.cli.core.keys.generate_ssh_keys', 'keys.generate_ssh_keys', (['private_key_filepath', 'public_key_filepath'], {}), '(private_key_filepath, public_key_filepath)\n', (45350, 45393), True, 'import azure.cli.core.keys as keys\n'), ((45761, 45910), 'knack.util.CLIError', 'CLIError', (['"""An RSA key file or key value must be supplied to SSH Key Value. You can use --generate-ssh-keys to let CLI generate one for you"""'], {}), "(\n 'An RSA key file or key value must be supplied to SSH Key Value. You can use --generate-ssh-keys to let CLI generate one for you'\n )\n", (45769, 45910), False, 'from knack.util import CLIError\n'), ((47232, 47332), 'knack.util.CLIError', 'CLIError', (['"""usage error: \'--scope\'/\'--role\' is only applicable when assign system identity"""'], {}), '(\n "usage error: \'--scope\'/\'--role\' is only applicable when assign system identity"\n )\n', (47240, 47332), False, 'from knack.util import CLIError\n'), ((51582, 51708), 'knack.util.CLIError', 'CLIError', (['"""usage error: \'--single-placement-group\' should be turned off for zonal scale-sets or with 100+ instances"""'], {}), '(\n "usage error: \'--single-placement-group\' should be turned off for zonal scale-sets or with 100+ instances"\n )\n', (51590, 51708), False, 'from knack.util import CLIError\n'), ((61580, 61601), 'knack.util.CLIError', 'CLIError', (['usage_error'], {}), '(usage_error)\n', (61588, 61601), False, 'from knack.util import CLIError\n'), ((61647, 61668), 'knack.util.CLIError', 'CLIError', (['usage_error'], {}), '(usage_error)\n', (61655, 61668), False, 'from knack.util import CLIError\n'), ((63628, 63738), 'knack.util.CLIError', 'CLIError', (['"""usage error: os type is required to create the image, please specify \'--os-type OS_TYPE\'"""'], {}), '(\n "usage error: os type is required to create the image, please specify \'--os-type OS_TYPE\'"\n )\n', (63636, 63738), False, 'from knack.util import CLIError\n'), ((33334, 33356), 're.match', 're.match', (['s', 'sku', 're.I'], {}), '(s, sku, re.I)\n', (33342, 33356), False, 'import re\n'), ((53346, 53394), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['namespace.application_gateway'], {}), '(namespace.application_gateway)\n', (53363, 53394), False, 'from msrestazure.tools import parse_resource_id\n'), ((55444, 55486), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['namespace.load_balancer'], {}), '(namespace.load_balancer)\n', (55461, 55486), False, 'from msrestazure.tools import parse_resource_id\n'), ((53197, 53245), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['namespace.application_gateway'], {}), '(namespace.application_gateway)\n', (53214, 53245), False, 'from msrestazure.tools import parse_resource_id\n'), ((55326, 55368), 'msrestazure.tools.parse_resource_id', 'parse_resource_id', (['namespace.load_balancer'], {}), '(namespace.load_balancer)\n', (55343, 55368), False, 'from msrestazure.tools import parse_resource_id\n'), ((39634, 39666), 'azure.cli.core.commands.client_factory.get_subscription_id', 'get_subscription_id', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (39653, 39666), False, 'from azure.cli.core.commands.client_factory import get_subscription_id\n')] |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
import torch.nn as nn
from parameterized import parameterized
from monai.networks import eval_mode
from monai.networks.blocks import SubpixelUpsample
from monai.networks.layers.factories import Conv
TEST_CASE_SUBPIXEL = []
for inch in range(1, 5):
for dim in range(1, 4):
for factor in range(1, 3):
test_case = [
{"dimensions": dim, "in_channels": inch, "scale_factor": factor},
(2, inch, *([8] * dim)),
(2, inch, *([8 * factor] * dim)),
]
TEST_CASE_SUBPIXEL.append(test_case)
TEST_CASE_SUBPIXEL_2D_EXTRA = [
{"dimensions": 2, "in_channels": 2, "scale_factor": 3},
(2, 2, 8, 4), # different size for H and W
(2, 2, 24, 12),
]
TEST_CASE_SUBPIXEL_3D_EXTRA = [
{"dimensions": 3, "in_channels": 1, "scale_factor": 2},
(2, 1, 16, 8, 4), # different size for H, W and D
(2, 1, 32, 16, 8),
]
conv_block = nn.Sequential(
Conv[Conv.CONV, 3](1, 4, kernel_size=1), Conv[Conv.CONV, 3](4, 8, kernel_size=3, stride=1, padding=1)
)
TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA = [
{"dimensions": 3, "in_channels": 1, "scale_factor": 2, "conv_block": conv_block},
(2, 1, 16, 8, 4), # different size for H, W and D
(2, 1, 32, 16, 8),
]
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_2D_EXTRA)
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_3D_EXTRA)
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA)
# add every test back with the pad/pool sequential component omitted
for tests in list(TEST_CASE_SUBPIXEL):
args: dict = tests[0] # type: ignore
args = dict(args)
args["apply_pad_pool"] = False
TEST_CASE_SUBPIXEL.append([args, tests[1], tests[2]])
class TestSUBPIXEL(unittest.TestCase):
@parameterized.expand(TEST_CASE_SUBPIXEL)
def test_subpixel_shape(self, input_param, input_shape, expected_shape):
net = SubpixelUpsample(**input_param)
with eval_mode(net):
result = net.forward(torch.randn(input_shape))
self.assertEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| [
"parameterized.parameterized.expand",
"monai.networks.eval_mode",
"monai.networks.blocks.SubpixelUpsample",
"unittest.main",
"torch.randn"
] | [((2354, 2394), 'parameterized.parameterized.expand', 'parameterized.expand', (['TEST_CASE_SUBPIXEL'], {}), '(TEST_CASE_SUBPIXEL)\n', (2374, 2394), False, 'from parameterized import parameterized\n'), ((2698, 2713), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2711, 2713), False, 'import unittest\n'), ((2486, 2517), 'monai.networks.blocks.SubpixelUpsample', 'SubpixelUpsample', ([], {}), '(**input_param)\n', (2502, 2517), False, 'from monai.networks.blocks import SubpixelUpsample\n'), ((2531, 2545), 'monai.networks.eval_mode', 'eval_mode', (['net'], {}), '(net)\n', (2540, 2545), False, 'from monai.networks import eval_mode\n'), ((2580, 2604), 'torch.randn', 'torch.randn', (['input_shape'], {}), '(input_shape)\n', (2591, 2604), False, 'import torch\n')] |
from __future__ import absolute_import, division, print_function
import os
import json
from glue.core import Subset
DISPATCH = {}
def save_page(page, page_number, label, subset):
""" Convert a tab of a glue session into a D3PO page
:param page: Tuple of data viewers to save
:param label: Tab label
"""
result = {}
# layout settings
result['grid'] = {'nRows': 1, 'nColumns': len(page)}
result['name'] = str(label)
result['caption'] = 'Generated by Glue'
# style settings
d = page[0]._data[0]
unselected = dict(opacity=d.style.alpha,
size=d.style.markersize / 2,
color=d.style.color)
result['markerStyle'] = dict(unselected=unselected)
if subset is not None:
s = subset.style
selected = dict(opacity=s.alpha, size=s.markersize / 2, color=s.color)
result['markerStyle']['selected'] = selected
result['selection'] = {'type': 'booleanColumn',
'columnName': 'selection_%i' % page_number}
result['histogramStyle'] = result['markerStyle']
# save each plot
result['plots'] = list(map(save_plot, page, range(len(page))))
return result
def save_plot_base(plot, index):
result = {}
result['gridPosition'] = [0, index]
return result
def save_plot(plot, index):
typ = type(plot)
return DISPATCH[typ](plot, index)
def save_scatter(plot, index):
""" Convert a single glue scatter plot to a D3PO plot
:param plot: Glue scatter plot
:class:`~glue.viewers.scatter.qt.ScatterViewer`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
result['type'] = 'scatter'
result['xAxis'] = dict(columnName=plot.state.x_att.label,
range=[float(plot.state.x_min), float(plot.state.x_max)])
result['yAxis'] = dict(columnName=plot.state.y_att.label,
range=[float(plot.state.y_min), float(plot.state.y_max)])
# XXX log scales
return result
def save_histogram(plot, index):
""" Convert a single histogram to a D3PO plot
:param plot: Glue histogram
:type plot: :class:`~glue.viewers.histogram.qt.HistogramViewer`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
result['type'] = 'histogram'
result['xAxis'] = dict(columnName=plot.state.x_att.label,
bins=int(plot.state.hist_n_bin),
range=[float(plot.state.hist_x_min), float(plot.state.hist_x_max)])
# XXX normed, cumultive, log
return result
def stage_subsets(application):
"""
Return a tuple of the subset to use for each stage/tab,
or None if the tab has no subset
If more than one subset is used per stage/tab, returns None
"""
result = []
for page in application.viewers:
subset = None
for viewer in page:
for layer_artist in viewer.layers:
if not layer_artist.visible:
continue
s = layer_artist.layer
if not isinstance(s, Subset):
continue
if subset is not None and s is not subset:
return None
if subset is None:
subset = s
result.append(subset)
return tuple(result)
def can_save_d3po(application):
"""
Check whether an application can be exported to D3PO.
Raises an exception if not
"""
dc = application.session.data_collection
if len(dc) != 1:
raise ValueError("D3PO Export only supports a single dataset")
for tab in application.viewers:
for viewer in tab:
if not isinstance(viewer, tuple(DISPATCH.keys())):
raise ValueError("D3PO Export only supports scatter "
"and histogram plots")
if sum(len(tab) for tab in application.viewers) == 0:
raise ValueError("D3PO Export requires at least one scatterplot "
"or histogram")
if stage_subsets(application) is None:
raise ValueError("D3PO Export restricted to 0 or 1 subsets visible "
"in each tab")
def make_data_file(data, subsets, path):
"""
Create the data.csv file, given Data and tuple of subsets
"""
from astropy.table import Table, Column
data_path = os.path.join(path, 'data.csv')
t = Table([data[c] for c in data.components],
names=[c.label for c in data.components])
for i, subset in enumerate(subsets):
if subset is None:
continue
c = Column(data=subset.to_mask().astype('i'), name='selection_%i' % i)
t.add_column(c)
t.write(data_path, format='ascii', delimiter=',')
def save_d3po(application, path, launch=True):
"""Save a Glue session to a D3PO bundle.
Currently, this has the following restrictions:
- The Glue session must have only one dataset open, and 0 or 1 subsets
- Only scatter plots or histograms are present
- At least one plot is present
:param application: Glue appication to save
:param path: Path to directory to save in. Will be created if needed
"""
if os.path.exists(path) and not os.path.isdir(path):
os.unlink(path)
if not os.path.exists(path):
os.mkdir(path)
data = application.session.data_collection[0]
subsets = stage_subsets(application)
viewers = application.viewers
# data.csv
make_data_file(data, subsets, path)
# states.json
result = {}
result['filename'] = 'data.csv' # XXX don't think this is needed?
result['title'] = "Glue export of %s" % data.label
result['states'] = list(map(save_page, application.viewers,
range(len(viewers)),
application.tab_names,
subsets))
state_path = os.path.join(path, 'states.json')
with open(state_path, 'w') as outfile:
json.dump(result, outfile, indent=2, sort_keys=True)
# index.html
html_path = os.path.join(path, 'index.html')
with open(html_path, 'w') as outfile:
outfile.write(HTML)
# show the result
if launch:
launch_d3po(path)
def launch_d3po(path):
"""Start a server to view an exported D3PO bundle, and open a browser.
:param path: The TLD of the bundle
"""
from glue.external.six.moves.socketserver import TCPServer
from glue.external.six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler
from random import randrange
from socket import error
import webbrowser
from threading import Thread
os.chdir(path)
while True:
try:
PORT = randrange(8000, 9000)
server = TCPServer(("", PORT), SimpleHTTPRequestHandler, False)
server.allow_reuse_address = True
server.server_bind()
break
except error: # port already taken
pass
print('Serving D3PO on port 0.0.0.0:%i' % PORT)
server.server_activate()
thread = Thread(target=server.serve_forever)
thread.setDaemon(True) # do not prevent shutdown
thread.start()
webbrowser.open('http://0.0.0.0:%i' % PORT)
def setup():
from glue.config import exporters
exporters.add('D3PO', save_d3po, can_save_d3po, outmode='directory')
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/style.css">
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/d3po.css">
<link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:100,200,300,400,700' rel='stylesheet' type='text/css'>
<style>
#footer {
position: fixed;
bottom: 0;
right: 0;
}
</style>
<!-- not to be confused with Planet Telex -->
<!-- Javscript dependencies -->
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="http://d3po.org/static/js/util.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script src="http://d3po.org/static/js/d3po.js"></script>
<script src="http://d3po.org/static/js/d3po.init.js"></script>
</head>
<body>
<div id="svg"><svg></svg></div>
<div id="controls">
<ul class="navigation">
</ul>
</div>
<div id="caption"></div>
<div id="footer">
More information: <a href="http://d3po.org">d3po.org</a>
</div>
<script type="text/javascript">
$(document).ready(function() {
initialize('states.json', 'data.csv');
}
);
</script>
</body>
</html>
"""
try:
from glue.viewers.scatter.qt import ScatterViewer
from glue.viewers.histogram.qt import HistogramViewer
except ImportError:
pass
else:
DISPATCH[ScatterViewer] = save_scatter
DISPATCH[HistogramViewer] = save_histogram
| [
"os.path.exists",
"astropy.table.Table",
"random.randrange",
"os.path.join",
"webbrowser.open",
"os.chdir",
"glue.config.exporters.add",
"os.path.isdir",
"glue.external.six.moves.socketserver.TCPServer",
"os.unlink",
"os.mkdir",
"threading.Thread",
"json.dump"
] | [((4561, 4591), 'os.path.join', 'os.path.join', (['path', '"""data.csv"""'], {}), "(path, 'data.csv')\n", (4573, 4591), False, 'import os\n'), ((4601, 4689), 'astropy.table.Table', 'Table', (['[data[c] for c in data.components]'], {'names': '[c.label for c in data.components]'}), '([data[c] for c in data.components], names=[c.label for c in data.\n components])\n', (4606, 4689), False, 'from astropy.table import Table, Column\n'), ((6098, 6131), 'os.path.join', 'os.path.join', (['path', '"""states.json"""'], {}), "(path, 'states.json')\n", (6110, 6131), False, 'import os\n'), ((6270, 6302), 'os.path.join', 'os.path.join', (['path', '"""index.html"""'], {}), "(path, 'index.html')\n", (6282, 6302), False, 'import os\n'), ((6852, 6866), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (6860, 6866), False, 'import os\n'), ((7268, 7303), 'threading.Thread', 'Thread', ([], {'target': 'server.serve_forever'}), '(target=server.serve_forever)\n', (7274, 7303), False, 'from threading import Thread\n'), ((7381, 7424), 'webbrowser.open', 'webbrowser.open', (["('http://0.0.0.0:%i' % PORT)"], {}), "('http://0.0.0.0:%i' % PORT)\n", (7396, 7424), False, 'import webbrowser\n'), ((7482, 7550), 'glue.config.exporters.add', 'exporters.add', (['"""D3PO"""', 'save_d3po', 'can_save_d3po'], {'outmode': '"""directory"""'}), "('D3PO', save_d3po, can_save_d3po, outmode='directory')\n", (7495, 7550), False, 'from glue.config import exporters\n'), ((5392, 5412), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5406, 5412), False, 'import os\n'), ((5450, 5465), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (5459, 5465), False, 'import os\n'), ((5478, 5498), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5492, 5498), False, 'import os\n'), ((5508, 5522), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (5516, 5522), False, 'import os\n'), ((6183, 6235), 'json.dump', 'json.dump', (['result', 'outfile'], {'indent': '(2)', 'sort_keys': '(True)'}), '(result, outfile, indent=2, sort_keys=True)\n', (6192, 6235), False, 'import json\n'), ((5421, 5440), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5434, 5440), False, 'import os\n'), ((6916, 6937), 'random.randrange', 'randrange', (['(8000)', '(9000)'], {}), '(8000, 9000)\n', (6925, 6937), False, 'from random import randrange\n'), ((6959, 7013), 'glue.external.six.moves.socketserver.TCPServer', 'TCPServer', (["('', PORT)", 'SimpleHTTPRequestHandler', '(False)'], {}), "(('', PORT), SimpleHTTPRequestHandler, False)\n", (6968, 7013), False, 'from glue.external.six.moves.socketserver import TCPServer\n')] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utils for testing the API service.
"""
import datetime
import json
ADMIN_TOKEN = '<PASSWORD>'
MEMBER_TOKEN = '<PASSWORD>'
class FakeMemcache(object):
"""Fake cache that is used for keystone tokens lookup."""
_cache = {
'tokens/%s' % ADMIN_TOKEN: {
'access': {
'token': {'id': ADMIN_TOKEN},
'user': {'id': 'user_id1',
'name': 'user_name1',
'tenantId': '123i2910',
'tenantName': 'mytenant',
'roles': [{'name': 'admin'}]
},
}
},
'tokens/%s' % MEMBER_TOKEN: {
'access': {
'token': {'id': MEMBER_TOKEN},
'user': {'id': 'user_id2',
'name': 'user-good',
'tenantId': 'project-good',
'tenantName': 'goodies',
'roles': [{'name': 'Member'}]
}
}
}
}
def __init__(self):
self.set_key = None
self.set_value = None
self.token_expiration = None
def get(self, key):
dt = datetime.datetime.now() + datetime.timedelta(minutes=5)
return json.dumps((self._cache.get(key), dt.strftime('%s')))
def set(self, key, value, timeout=None):
self.set_value = value
self.set_key = key
| [
"datetime.datetime.now",
"datetime.timedelta"
] | [((1838, 1861), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1859, 1861), False, 'import datetime\n'), ((1864, 1893), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (1882, 1893), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
"""Proiect.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw
"""
# Importarea librariilor
import numpy as np
import pandas as pd # pandas pentru citirea fisierelor
from sklearn import preprocessing
from sklearn import svm # importarea modelului
from sklearn.feature_extraction.text import TfidfVectorizer # modelarea datelor pentru a obtine valori numerice din text
from sklearn.metrics import classification_report, confusion_matrix
# Incarcarea datelor
train_labels = pd.read_csv('train_labels.txt', sep='\t', header=None, engine='python')
train_labels = train_labels.to_numpy() # convertim data frame-ul intr-un vector
train_labels = train_labels[:,1] # pastram doar etichetele
train_samples = pd.read_csv('train_samples.txt', sep='\t', header=None, engine='python')
train_samples = train_samples.to_numpy()
train_samples = train_samples[:,1] # pastram doar cuvintele
validation_samples = pd.read_csv('validation_samples.txt', sep='\t', header=None, engine='python')
validation_samples = validation_samples.to_numpy()
validation_samples = validation_samples[:,1] # salvam cuvintele
validation_labels = pd.read_csv('validation_labels.txt', sep='\t', header=None, engine='python')
validation_labels = validation_labels.to_numpy()
validation_labels = validation_labels[:,1] # pastram doar etichetele
test_samples = pd.read_csv('test_samples.txt', sep='\t', header=None, engine='python')
test_samples = test_samples.to_numpy()
label = test_samples[:,0] # salvam etichetele
test_samples = test_samples[:,1] # salvam cuvintele
def normalize_data(train_data, test_data, type='l2'): # functia care intoarce datele normalizate
#tipul de normalizare este setat implicit la l2
scaler = None
if type == 'standard':
scaler = preprocessing.StandardScaler()
elif type == 'min_max':
scaler = preprocessing.MinMaxScaler()
elif type == 'l1' or type == 'l2':
scaler = preprocessing.Normalizer(norm = type)
if scaler is not None:
scaler.fit(train_data)
scaled_train_data = scaler.transform(train_data)
scaled_test_data = scaler.transform(test_data)
return scaled_train_data, scaled_test_data
else:
return train_data, test_data
# Modelarea datelor
vectorizer = TfidfVectorizer()
training_features = vectorizer.fit_transform(train_samples)
validation_features = vectorizer.transform(validation_samples)
testing_features = vectorizer.transform(test_samples)
# Normalizarea datelor
norm_train, norm_test = normalize_data(training_features, testing_features)
norm_validation, _ = normalize_data(validation_features, validation_features)
# Aplicam modelul SVM
model_svm = svm.SVC(kernel='linear', C=23, gamma=110) # definim modelul
model_svm.fit(norm_train, train_labels) # procesul de invatare
test_predictions = model_svm.predict(norm_test) # predictie pe datele de test
print("Classification report: ")
print(classification_report(validation_labels, model_svm.predict(norm_validation)))
print("Confusion matrix: ")
print(confusion_matrix(validation_labels, model_svm.predict(norm_validation)))
# Exportarea datelor in format CSV
test_export = {'id':label,'label':test_predictions}
data_f = pd.DataFrame(test_export)
data_f.to_csv('test_submission.csv',index=False) | [
"pandas.read_csv",
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.DataFrame",
"sklearn.preprocessing.Normalizer",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.svm.SVC"
] | [((602, 673), 'pandas.read_csv', 'pd.read_csv', (['"""train_labels.txt"""'], {'sep': '"""\t"""', 'header': 'None', 'engine': '"""python"""'}), "('train_labels.txt', sep='\\t', header=None, engine='python')\n", (613, 673), True, 'import pandas as pd\n'), ((830, 902), 'pandas.read_csv', 'pd.read_csv', (['"""train_samples.txt"""'], {'sep': '"""\t"""', 'header': 'None', 'engine': '"""python"""'}), "('train_samples.txt', sep='\\t', header=None, engine='python')\n", (841, 902), True, 'import pandas as pd\n'), ((1026, 1103), 'pandas.read_csv', 'pd.read_csv', (['"""validation_samples.txt"""'], {'sep': '"""\t"""', 'header': 'None', 'engine': '"""python"""'}), "('validation_samples.txt', sep='\\t', header=None, engine='python')\n", (1037, 1103), True, 'import pandas as pd\n'), ((1240, 1316), 'pandas.read_csv', 'pd.read_csv', (['"""validation_labels.txt"""'], {'sep': '"""\t"""', 'header': 'None', 'engine': '"""python"""'}), "('validation_labels.txt', sep='\\t', header=None, engine='python')\n", (1251, 1316), True, 'import pandas as pd\n'), ((1451, 1522), 'pandas.read_csv', 'pd.read_csv', (['"""test_samples.txt"""'], {'sep': '"""\t"""', 'header': 'None', 'engine': '"""python"""'}), "('test_samples.txt', sep='\\t', header=None, engine='python')\n", (1462, 1522), True, 'import pandas as pd\n'), ((2378, 2395), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (2393, 2395), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2789, 2830), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'C': '(23)', 'gamma': '(110)'}), "(kernel='linear', C=23, gamma=110)\n", (2796, 2830), False, 'from sklearn import svm\n'), ((3313, 3338), 'pandas.DataFrame', 'pd.DataFrame', (['test_export'], {}), '(test_export)\n', (3325, 3338), True, 'import pandas as pd\n'), ((1872, 1902), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (1900, 1902), False, 'from sklearn import preprocessing\n'), ((1949, 1977), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (1975, 1977), False, 'from sklearn import preprocessing\n'), ((2035, 2070), 'sklearn.preprocessing.Normalizer', 'preprocessing.Normalizer', ([], {'norm': 'type'}), '(norm=type)\n', (2059, 2070), False, 'from sklearn import preprocessing\n')] |
from __future__ import annotations
import collections
import inspect
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from flytekit.common import constants as _common_constants
from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException
from flytekit.core.base_task import PythonTask
from flytekit.core.class_based_resolver import ClassStorageTaskResolver
from flytekit.core.condition import ConditionalSection
from flytekit.core.context_manager import (
BranchEvalMode,
CompilationState,
ExecutionState,
FlyteContext,
FlyteContextManager,
FlyteEntities,
)
from flytekit.core.interface import (
Interface,
transform_inputs_to_parameters,
transform_interface_to_typed_interface,
transform_signature_to_interface,
)
from flytekit.core.launch_plan import LaunchPlan
from flytekit.core.node import Node
from flytekit.core.promise import (
NodeOutput,
Promise,
VoidPromise,
binding_from_python_std,
create_and_link_node,
create_native_named_tuple,
create_task_output,
translate_inputs_to_literals,
)
from flytekit.core.python_auto_container import PythonAutoContainerTask
from flytekit.core.reference_entity import ReferenceEntity, WorkflowReference
from flytekit.core.type_engine import TypeEngine
from flytekit.loggers import logger
from flytekit.models import interface as _interface_models
from flytekit.models import literals as _literal_models
from flytekit.models.core import workflow as _workflow_model
GLOBAL_START_NODE = Node(
id=_common_constants.GLOBAL_INPUT_NODE_ID,
metadata=None,
bindings=[],
upstream_nodes=[],
flyte_entity=None,
)
class WorkflowFailurePolicy(Enum):
FAIL_IMMEDIATELY = _workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_IMMEDIATELY
FAIL_AFTER_EXECUTABLE_NODES_COMPLETE = (
_workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE
)
@dataclass
class WorkflowMetadata(object):
on_failure: WorkflowFailurePolicy
def __post_init__(self):
if (
self.on_failure != WorkflowFailurePolicy.FAIL_IMMEDIATELY
and self.on_failure != WorkflowFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE
):
raise FlyteValidationException(f"Failure policy {self.on_failure} not acceptable")
def to_flyte_model(self):
if self.on_failure == WorkflowFailurePolicy.FAIL_IMMEDIATELY:
on_failure = 0
else:
on_failure = 1
return _workflow_model.WorkflowMetadata(on_failure=on_failure)
@dataclass
class WorkflowMetadataDefaults(object):
"""
This class is similarly named to the one above. Please see the IDL for more information but essentially, this
WorkflowMetadataDefaults class represents the defaults that are handed down to a workflow's tasks, whereas
WorkflowMetadata represents metadata about the workflow itself.
"""
interruptible: bool
def __post_init__(self):
if self.interruptible is not True and self.interruptible is not False:
raise FlyteValidationException(f"Interruptible must be boolean, {self.interruptible} invalid")
def to_flyte_model(self):
return _workflow_model.WorkflowMetadataDefaults(interruptible=self.interruptible)
def construct_input_promises(inputs: List[str]):
return {
input_name: Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name))
for input_name in inputs
}
def get_promise(binding_data: _literal_models.BindingData, outputs_cache: Dict[Node, Dict[str, Promise]]) -> Promise:
"""
This is a helper function that will turn a binding into a Promise object, using a lookup map. Please see
get_promise_map for the rest of the details.
"""
if binding_data.promise is not None:
if not isinstance(binding_data.promise, NodeOutput):
raise FlyteValidationException(
f"Binding data Promises have to be of the NodeOutput type {type(binding_data.promise)} found"
)
# b.var is the name of the input to the task
# binding_data.promise.var is the name of the upstream node's output we want
return outputs_cache[binding_data.promise.node][binding_data.promise.var]
elif binding_data.scalar is not None:
return Promise(var="placeholder", val=_literal_models.Literal(scalar=binding_data.scalar))
elif binding_data.collection is not None:
literals = []
for bd in binding_data.collection.bindings:
p = get_promise(bd, outputs_cache)
literals.append(p.val)
return Promise(
var="placeholder",
val=_literal_models.Literal(collection=_literal_models.LiteralCollection(literals=literals)),
)
elif binding_data.map is not None:
literals = {}
for k, bd in binding_data.map.bindings.items():
p = get_promise(bd, outputs_cache)
literals[k] = p.val
return Promise(
var="placeholder", val=_literal_models.Literal(map=_literal_models.LiteralMap(literals=literals))
)
raise FlyteValidationException("Binding type unrecognized.")
def get_promise_map(
bindings: List[_literal_models.Binding], outputs_cache: Dict[Node, Dict[str, Promise]]
) -> Dict[str, Promise]:
"""
Local execution of imperatively defined workflows is done node by node. This function will fill in the node's
entity's input arguments, which are specified using the bindings list, and a map of nodes to its outputs.
Basically this takes the place of propeller in resolving bindings, pulling in outputs from previously completed
nodes and filling in the necessary inputs.
"""
entity_kwargs = {}
for b in bindings:
entity_kwargs[b.var] = get_promise(b.binding, outputs_cache)
return entity_kwargs
class WorkflowBase(object):
def __init__(
self,
name: str,
workflow_metadata: WorkflowMetadata,
workflow_metadata_defaults: WorkflowMetadataDefaults,
python_interface: Interface,
**kwargs,
):
self._name = name
self._workflow_metadata = workflow_metadata
self._workflow_metadata_defaults = workflow_metadata_defaults
self._python_interface = python_interface
self._interface = transform_interface_to_typed_interface(python_interface)
self._inputs = {}
self._unbound_inputs = set()
self._nodes = []
self._output_bindings: Optional[List[_literal_models.Binding]] = []
FlyteEntities.entities.append(self)
super().__init__(**kwargs)
@property
def name(self) -> str:
return self._name
@property
def short_name(self) -> str:
return self._name.split(".")[-1]
@property
def workflow_metadata(self) -> Optional[WorkflowMetadata]:
return self._workflow_metadata
@property
def workflow_metadata_defaults(self):
return self._workflow_metadata_defaults
@property
def python_interface(self) -> Interface:
return self._python_interface
@property
def interface(self) -> _interface_models.TypedInterface:
return self._interface
@property
def output_bindings(self) -> List[_literal_models.Binding]:
return self._output_bindings
@property
def nodes(self) -> List[Node]:
return self._nodes
def __repr__(self):
return (
f"WorkflowBase - {self._name} && "
f"Inputs ({len(self._python_interface.inputs)}): {self._python_interface.inputs} && "
f"Outputs ({len(self._python_interface.outputs)}): {self._python_interface.outputs} && "
f"Output bindings: {self._output_bindings} && "
)
def __call__(self, *args, **kwargs):
"""
The call pattern for Workflows is close to, but not exactly, the call pattern for Tasks. For local execution,
it goes
__call__ -> _local_execute -> execute
From execute, different things happen for the two Workflow styles. For PythonFunctionWorkflows, the Python
function is run, for the ImperativeWorkflow, each node is run one at a time.
"""
if len(args) > 0:
raise AssertionError("Only Keyword Arguments are supported for Workflow executions")
ctx = FlyteContextManager.current_context()
# Get default agruements and override with kwargs passed in
input_kwargs = self.python_interface.default_inputs_as_kwargs
input_kwargs.update(kwargs)
# The first condition is compilation.
if ctx.compilation_state is not None:
return create_and_link_node(ctx, entity=self, interface=self.python_interface, **input_kwargs)
# This condition is hit when this workflow (self) is being called as part of a parent's workflow local run.
# The context specifying the local workflow execution has already been set.
elif (
ctx.execution_state is not None and ctx.execution_state.mode == ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION
):
if ctx.execution_state.branch_eval_mode == BranchEvalMode.BRANCH_SKIPPED:
if self.python_interface and self.python_interface.output_tuple_name:
variables = [k for k in self.python_interface.outputs.keys()]
output_tuple = collections.namedtuple(self.python_interface.output_tuple_name, variables)
nones = [None for _ in self.python_interface.outputs.keys()]
return output_tuple(*nones)
else:
return None
# We are already in a local execution, just continue the execution context
return self._local_execute(ctx, **input_kwargs)
# Last is starting a local workflow execution
else:
# Run some sanity checks
# Even though the _local_execute call generally expects inputs to be Promises, we don't have to do the
# conversion here in this loop. The reason is because we don't prevent users from specifying inputs
# as direct scalars, which means there's another Promise-generating loop inside _local_execute too
for k, v in input_kwargs.items():
if k not in self.interface.inputs:
raise ValueError(f"Received unexpected keyword argument {k}")
if isinstance(v, Promise):
raise ValueError(f"Received a promise for a workflow call, when expecting a native value for {k}")
with FlyteContextManager.with_context(
ctx.with_execution_state(
ctx.new_execution_state().with_params(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION)
)
) as child_ctx:
result = self._local_execute(child_ctx, **input_kwargs)
expected_outputs = len(self.python_interface.outputs)
if expected_outputs == 0:
if result is None or isinstance(result, VoidPromise):
return None
else:
raise Exception(f"Workflow local execution expected 0 outputs but something received {result}")
if (1 < expected_outputs == len(result)) or (result is not None and expected_outputs == 1):
return create_native_named_tuple(ctx, result, self.python_interface)
raise ValueError("expected outputs and actual outputs do not match")
def execute(self, **kwargs):
raise Exception("Should not be called")
def _local_execute(self, ctx: FlyteContext, **kwargs) -> Union[Tuple[Promise], Promise, VoidPromise]:
# This is done to support the invariant that Workflow local executions always work with Promise objects
# holding Flyte literal values. Even in a wf, a user can call a sub-workflow with a Python native value.
for k, v in kwargs.items():
if not isinstance(v, Promise):
t = self.python_interface.inputs[k]
kwargs[k] = Promise(var=k, val=TypeEngine.to_literal(ctx, v, t, self.interface.inputs[k].type))
# The output of this will always be a combination of Python native values and Promises containing Flyte
# Literals.
function_outputs = self.execute(**kwargs)
# First handle the empty return case.
# A workflow function may return a task that doesn't return anything
# def wf():
# return t1()
# or it may not return at all
# def wf():
# t1()
# In the former case we get the task's VoidPromise, in the latter we get None
if isinstance(function_outputs, VoidPromise) or function_outputs is None:
if len(self.python_interface.outputs) != 0:
raise FlyteValueException(
function_outputs,
f"{function_outputs} received but interface has {len(self.python_interface.outputs)} outputs.",
)
return VoidPromise(self.name)
# Because we should've already returned in the above check, we just raise an error here.
if len(self.python_interface.outputs) == 0:
raise FlyteValueException(
function_outputs, f"{function_outputs} received but should've been VoidPromise or None."
)
expected_output_names = list(self.python_interface.outputs.keys())
if len(expected_output_names) == 1:
# Here we have to handle the fact that the wf could've been declared with a typing.NamedTuple of
# length one. That convention is used for naming outputs - and single-length-NamedTuples are
# particularly troublesome but elegant handling of them is not a high priority
# Again, we're using the output_tuple_name as a proxy.
if self.python_interface.output_tuple_name and isinstance(function_outputs, tuple):
wf_outputs_as_map = {expected_output_names[0]: function_outputs[0]}
else:
wf_outputs_as_map = {expected_output_names[0]: function_outputs}
else:
wf_outputs_as_map = {expected_output_names[i]: function_outputs[i] for i, _ in enumerate(function_outputs)}
# Basically we need to repackage the promises coming from the tasks into Promises that match the workflow's
# interface. We do that by extracting out the literals, and creating new Promises
wf_outputs_as_literal_dict = translate_inputs_to_literals(
ctx,
wf_outputs_as_map,
flyte_interface_types=self.interface.outputs,
native_types=self.python_interface.outputs,
)
# Recreate new promises that use the workflow's output names.
new_promises = [Promise(var, wf_outputs_as_literal_dict[var]) for var in expected_output_names]
return create_task_output(new_promises, self.python_interface)
class ImperativeWorkflow(WorkflowBase):
def __init__(
self,
name: str,
failure_policy: Optional[WorkflowFailurePolicy] = None,
interruptible: Optional[bool] = False,
):
metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY)
workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible)
self._compilation_state = CompilationState(prefix="")
self._inputs = {}
# This unbound inputs construct is just here to help workflow authors detect issues a bit earlier. It just
# keeps track of workflow inputs that you've declared with add_workflow_input but haven't yet consumed. This
# is an error that Admin would return at compile time anyways, but this allows flytekit to raise
# the error earlier.
self._unbound_inputs = set()
super().__init__(
name=name,
workflow_metadata=metadata,
workflow_metadata_defaults=workflow_metadata_defaults,
python_interface=Interface(),
)
@property
def compilation_state(self) -> CompilationState:
"""
Compilation is done a bit at a time, one task or other entity call at a time. This is why this workflow
class has to keep track of its own compilation state.
"""
return self._compilation_state
@property
def nodes(self) -> List[Node]:
return self._compilation_state.nodes
@property
def inputs(self) -> Dict[str, Promise]:
"""
This holds the input promises to the workflow. The nodes in these Promise objects should always point to
the global start node.
"""
return self._inputs
def __repr__(self):
return super().__repr__() + f"Nodes ({len(self.compilation_state.nodes)}): {self.compilation_state.nodes}"
def execute(self, **kwargs):
"""
Called by _local_execute. This function is how local execution for imperative workflows runs. Because when an
entity is added using the add_entity function, all inputs to that entity should've been already declared, we
can just iterate through the nodes in order and we shouldn't run into any dependency issues. That is, we force
the user to declare entities already in a topological sort. To keep track of outputs, we create a map to
start things off, filled in only with the workflow inputs (if any). As things are run, their outputs are stored
in this map.
After all nodes are run, we fill in workflow level outputs the same way as any other previous node.
"""
if not self.ready():
raise FlyteValidationException(f"Workflow not ready, wf is currently {self}")
# Create a map that holds the outputs of each node.
intermediate_node_outputs = {GLOBAL_START_NODE: {}} # type: Dict[Node, Dict[str, Promise]]
# Start things off with the outputs of the global input node, i.e. the inputs to the workflow.
# _local_execute should've already ensured that all the values in kwargs are Promise objects
for k, v in kwargs.items():
intermediate_node_outputs[GLOBAL_START_NODE][k] = v
# Next iterate through the nodes in order.
for node in self.compilation_state.nodes:
if node not in intermediate_node_outputs.keys():
intermediate_node_outputs[node] = {}
# Retrieve the entity from the node, and call it by looking up the promises the node's bindings require,
# and then fill them in using the node output tracker map we have.
entity = node.flyte_entity
entity_kwargs = get_promise_map(node.bindings, intermediate_node_outputs)
# Handle the calling and outputs of each node's entity
results = entity(**entity_kwargs)
expected_output_names = list(entity.python_interface.outputs.keys())
if isinstance(results, VoidPromise) or results is None:
continue # pragma: no cover # Move along, nothing to assign
# Because we should've already returned in the above check, we just raise an Exception here.
if len(entity.python_interface.outputs) == 0:
raise FlyteValueException(results, f"{results} received but should've been VoidPromise or None.")
# if there's only one output,
if len(expected_output_names) == 1:
if entity.python_interface.output_tuple_name and isinstance(results, tuple):
intermediate_node_outputs[node][expected_output_names[0]] = results[0]
else:
intermediate_node_outputs[node][expected_output_names[0]] = results
else:
if len(results) != len(expected_output_names):
raise FlyteValueException(results, f"Different lengths {results} {expected_output_names}")
for idx, r in enumerate(results):
intermediate_node_outputs[node][expected_output_names[idx]] = r
# The rest of this function looks like the above but now we're doing it for the workflow as a whole rather
# than just one node at a time.
if len(self.python_interface.outputs) == 0:
return VoidPromise(self.name)
# The values that we return below from the output have to be pulled by fulfilling all of the
# workflow's output bindings.
# The return style here has to match what 1) what the workflow would've returned had it been declared
# functionally, and 2) what a user would return in mock function. That is, if it's a tuple, then it
# should be a tuple here, if it's a one element named tuple, then we do a one-element non-named tuple,
# if it's a single element then we return a single element
if len(self.output_bindings) == 1:
# Again use presence of output_tuple_name to understand that we're dealing with a one-element
# named tuple
if self.python_interface.output_tuple_name:
return (get_promise(self.output_bindings[0].binding, intermediate_node_outputs),)
# Just a normal single element
return get_promise(self.output_bindings[0].binding, intermediate_node_outputs)
return tuple([get_promise(b.binding, intermediate_node_outputs) for b in self.output_bindings])
def add_entity(self, entity: Union[PythonTask, LaunchPlan, WorkflowBase], **kwargs) -> Node:
"""
Anytime you add an entity, all the inputs to the entity must be bound.
"""
# circular import
from flytekit.core.node_creation import create_node
ctx = FlyteContext.current_context()
if ctx.compilation_state is not None:
raise Exception("Can't already be compiling")
with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx:
n = create_node(entity=entity, **kwargs)
def get_input_values(input_value):
if isinstance(input_value, list):
input_promises = []
for x in input_value:
input_promises.extend(get_input_values(x))
return input_promises
if isinstance(input_value, dict):
input_promises = []
for _, v in input_value.items():
input_promises.extend(get_input_values(v))
return input_promises
else:
return [input_value]
# Every time an entity is added, mark it as used. The above function though will gather all the input
# values but we're only interested in the ones that are Promises so let's filter for those.
# There's probably a way to clean this up, maybe key off of the name instead of value?
all_input_values = get_input_values(kwargs)
for input_value in filter(lambda x: isinstance(x, Promise), all_input_values):
if input_value in self._unbound_inputs:
self._unbound_inputs.remove(input_value)
return n
def add_workflow_input(self, input_name: str, python_type: Type) -> Interface:
"""
Adds an input to the workflow.
"""
if input_name in self._inputs:
raise FlyteValidationException(f"Input {input_name} has already been specified for wf {self.name}.")
self._python_interface = self._python_interface.with_inputs(extra_inputs={input_name: python_type})
self._interface = transform_interface_to_typed_interface(self._python_interface)
self._inputs[input_name] = Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name))
self._unbound_inputs.add(self._inputs[input_name])
return self._inputs[input_name]
def add_workflow_output(
self, output_name: str, p: Union[Promise, List[Promise], Dict[str, Promise]], python_type: Optional[Type] = None
):
"""
Add an output with the given name from the given node output.
"""
if output_name in self._python_interface.outputs:
raise FlyteValidationException(f"Output {output_name} already exists in workflow {self.name}")
if python_type is None:
if type(p) == list or type(p) == dict:
raise FlyteValidationException(
f"If specifying a list or dict of Promises, you must specify the python_type type for {output_name}"
f" starting with the container type (e.g. List[int]"
)
python_type = p.ref.node.flyte_entity.python_interface.outputs[p.var]
logger.debug(f"Inferring python type for wf output {output_name} from Promise provided {python_type}")
flyte_type = TypeEngine.to_literal_type(python_type=python_type)
ctx = FlyteContext.current_context()
if ctx.compilation_state is not None:
raise Exception("Can't already be compiling")
with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx:
b = binding_from_python_std(
ctx, output_name, expected_literal_type=flyte_type, t_value=p, t_value_type=python_type
)
self._output_bindings.append(b)
self._python_interface = self._python_interface.with_outputs(extra_outputs={output_name: python_type})
self._interface = transform_interface_to_typed_interface(self._python_interface)
def add_task(self, task: PythonTask, **kwargs) -> Node:
return self.add_entity(task, **kwargs)
def add_launch_plan(self, launch_plan: LaunchPlan, **kwargs) -> Node:
return self.add_entity(launch_plan, **kwargs)
def add_subwf(self, sub_wf: WorkflowBase, **kwargs) -> Node:
return self.add_entity(sub_wf, **kwargs)
def ready(self) -> bool:
"""
This function returns whether or not the workflow is in a ready state, which means
* Has at least one node
* All workflow inputs are bound
These conditions assume that all nodes and workflow i/o changes were done with the functions above, which
do additional checking.
"""
if len(self.compilation_state.nodes) == 0:
return False
if len(self._unbound_inputs) > 0:
return False
return True
class PythonFunctionWorkflow(WorkflowBase, ClassStorageTaskResolver):
"""
Please read :std:ref:`flyte:divedeep-workflows` first for a high-level understanding of what workflows are in Flyte.
This Python object represents a workflow defined by a function and decorated with the
:py:func:`@workflow <flytekit.workflow>` decorator. Please see notes on that object for additional information.
"""
def __init__(
self,
workflow_function: Callable,
metadata: Optional[WorkflowMetadata],
default_metadata: Optional[WorkflowMetadataDefaults],
):
name = f"{workflow_function.__module__}.{workflow_function.__name__}"
self._workflow_function = workflow_function
native_interface = transform_signature_to_interface(inspect.signature(workflow_function))
# TODO do we need this - can this not be in launchplan only?
# This can be in launch plan only, but is here only so that we don't have to re-evaluate. Or
# we can re-evaluate.
self._input_parameters = None
super().__init__(
name=name,
workflow_metadata=metadata,
workflow_metadata_defaults=default_metadata,
python_interface=native_interface,
)
@property
def function(self):
return self._workflow_function
def task_name(self, t: PythonAutoContainerTask) -> str:
return f"{self.name}.{t.__module__}.{t.name}"
def compile(self, **kwargs):
"""
Supply static Python native values in the kwargs if you want them to be used in the compilation. This mimics
a 'closure' in the traditional sense of the word.
"""
ctx = FlyteContextManager.current_context()
self._input_parameters = transform_inputs_to_parameters(ctx, self.python_interface)
all_nodes = []
prefix = f"{ctx.compilation_state.prefix}-{self.short_name}-" if ctx.compilation_state is not None else ""
with FlyteContextManager.with_context(
ctx.with_compilation_state(CompilationState(prefix=prefix, task_resolver=self))
) as comp_ctx:
# Construct the default input promise bindings, but then override with the provided inputs, if any
input_kwargs = construct_input_promises([k for k in self.interface.inputs.keys()])
input_kwargs.update(kwargs)
workflow_outputs = self._workflow_function(**input_kwargs)
all_nodes.extend(comp_ctx.compilation_state.nodes)
# This little loop was added as part of the task resolver change. The task resolver interface itself is
# more or less stateless (the future-proofing get_all_tasks function notwithstanding). However the
# implementation of the TaskResolverMixin that this workflow class inherits from (ClassStorageTaskResolver)
# does store state. This loop adds Tasks that are defined within the body of the workflow to the workflow
# object itself.
for n in comp_ctx.compilation_state.nodes:
if isinstance(n.flyte_entity, PythonAutoContainerTask) and n.flyte_entity.task_resolver == self:
logger.debug(f"WF {self.name} saving task {n.flyte_entity.name}")
self.add(n.flyte_entity)
# Iterate through the workflow outputs
bindings = []
output_names = list(self.interface.outputs.keys())
# The reason the length 1 case is separate is because the one output might be a list. We don't want to
# iterate through the list here, instead we should let the binding creation unwrap it and make a binding
# collection/map out of it.
if len(output_names) == 1:
if isinstance(workflow_outputs, tuple):
if len(workflow_outputs) != 1:
raise AssertionError(
f"The Workflow specification indicates only one return value, received {len(workflow_outputs)}"
)
if self.python_interface.output_tuple_name is None:
raise AssertionError(
"Outputs specification for Workflow does not define a tuple, but return value is a tuple"
)
workflow_outputs = workflow_outputs[0]
t = self.python_interface.outputs[output_names[0]]
b = binding_from_python_std(
ctx,
output_names[0],
self.interface.outputs[output_names[0]].type,
workflow_outputs,
t,
)
bindings.append(b)
elif len(output_names) > 1:
if not isinstance(workflow_outputs, tuple):
raise AssertionError("The Workflow specification indicates multiple return values, received only one")
if len(output_names) != len(workflow_outputs):
raise Exception(f"Length mismatch {len(output_names)} vs {len(workflow_outputs)}")
for i, out in enumerate(output_names):
if isinstance(workflow_outputs[i], ConditionalSection):
raise AssertionError("A Conditional block (if-else) should always end with an `else_()` clause")
t = self.python_interface.outputs[out]
b = binding_from_python_std(
ctx,
out,
self.interface.outputs[out].type,
workflow_outputs[i],
t,
)
bindings.append(b)
# Save all the things necessary to create an SdkWorkflow, except for the missing project and domain
self._nodes = all_nodes
self._output_bindings = bindings
if not output_names:
return None
if len(output_names) == 1:
return bindings[0]
return tuple(bindings)
def execute(self, **kwargs):
"""
This function is here only to try to streamline the pattern between workflows and tasks. Since tasks
call execute from dispatch_execute which is in _local_execute, workflows should also call an execute inside
_local_execute. This makes mocking cleaner.
"""
return self._workflow_function(**kwargs)
def workflow(
_workflow_function=None,
failure_policy: Optional[WorkflowFailurePolicy] = None,
interruptible: Optional[bool] = False,
):
"""
This decorator declares a function to be a Flyte workflow. Workflows are declarative entities that construct a DAG
of tasks using the data flow between tasks.
Unlike a task, the function body of a workflow is evaluated at serialization-time (aka compile-time). This is because
while we can determine the entire structure of a task by looking at the function's signature,
workflows need to run through the function itself because the body of the function is what expresses the workflow structure.
It's also important to note that, local execution notwithstanding, it is not evaluated again when the workflow runs on Flyte.
That is, workflows should not call non-Flyte entities since they are only run once (again, this is with respect to
the platform, local runs notwithstanding).
Please see the :std:doc:`cookbook:sphx_glr_auto_core_flyte_basics_basic_workflow.py` for more usage examples.
:param _workflow_function: This argument is implicitly passed and represents the decorated function.
:param failure_policy: Use the options in flytekit.WorkflowFailurePolicy
:param interruptible: Whether or not tasks launched from this workflow are by default interruptible
"""
def wrapper(fn):
workflow_metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY)
workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible)
workflow_instance = PythonFunctionWorkflow(
fn, metadata=workflow_metadata, default_metadata=workflow_metadata_defaults
)
workflow_instance.compile()
return workflow_instance
if _workflow_function:
return wrapper(_workflow_function)
else:
return wrapper
class ReferenceWorkflow(ReferenceEntity, PythonFunctionWorkflow):
"""
A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This
object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface.
If at registration time the interface provided causes an issue with compilation, an error will be returned.
"""
def __init__(
self, project: str, domain: str, name: str, version: str, inputs: Dict[str, Type], outputs: Dict[str, Type]
):
super().__init__(WorkflowReference(project, domain, name, version), inputs, outputs)
def reference_workflow(
project: str,
domain: str,
name: str,
version: str,
) -> Callable[[Callable[..., Any]], ReferenceWorkflow]:
"""
A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This
object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface.
If at registration time the interface provided causes an issue with compilation, an error will be returned.
"""
def wrapper(fn) -> ReferenceWorkflow:
interface = transform_signature_to_interface(inspect.signature(fn))
return ReferenceWorkflow(project, domain, name, version, interface.inputs, interface.outputs)
return wrapper
| [
"flytekit.core.promise.NodeOutput",
"flytekit.core.context_manager.CompilationState",
"flytekit.core.context_manager.FlyteContext.current_context",
"flytekit.core.context_manager.FlyteEntities.entities.append",
"inspect.signature",
"flytekit.core.promise.create_and_link_node",
"flytekit.core.type_engine.TypeEngine.to_literal_type",
"flytekit.core.reference_entity.WorkflowReference",
"flytekit.models.literals.Literal",
"flytekit.models.core.workflow.WorkflowMetadataDefaults",
"flytekit.core.promise.binding_from_python_std",
"flytekit.core.node.Node",
"flytekit.core.promise.VoidPromise",
"flytekit.core.context_manager.FlyteContextManager.current_context",
"flytekit.core.interface.transform_inputs_to_parameters",
"flytekit.core.type_engine.TypeEngine.to_literal",
"flytekit.common.exceptions.user.FlyteValueException",
"flytekit.common.exceptions.user.FlyteValidationException",
"flytekit.loggers.logger.debug",
"flytekit.core.promise.create_task_output",
"collections.namedtuple",
"flytekit.models.literals.LiteralCollection",
"flytekit.core.promise.Promise",
"flytekit.core.node_creation.create_node",
"flytekit.core.promise.create_native_named_tuple",
"flytekit.core.interface.transform_interface_to_typed_interface",
"flytekit.models.literals.LiteralMap",
"flytekit.core.promise.translate_inputs_to_literals",
"flytekit.core.interface.Interface",
"flytekit.models.core.workflow.WorkflowMetadata"
] | [((1613, 1730), 'flytekit.core.node.Node', 'Node', ([], {'id': '_common_constants.GLOBAL_INPUT_NODE_ID', 'metadata': 'None', 'bindings': '[]', 'upstream_nodes': '[]', 'flyte_entity': 'None'}), '(id=_common_constants.GLOBAL_INPUT_NODE_ID, metadata=None, bindings=[],\n upstream_nodes=[], flyte_entity=None)\n', (1617, 1730), False, 'from flytekit.core.node import Node\n'), ((5233, 5287), 'flytekit.common.exceptions.user.FlyteValidationException', 'FlyteValidationException', (['"""Binding type unrecognized."""'], {}), "('Binding type unrecognized.')\n", (5257, 5287), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((2601, 2656), 'flytekit.models.core.workflow.WorkflowMetadata', '_workflow_model.WorkflowMetadata', ([], {'on_failure': 'on_failure'}), '(on_failure=on_failure)\n', (2633, 2656), True, 'from flytekit.models.core import workflow as _workflow_model\n'), ((3306, 3380), 'flytekit.models.core.workflow.WorkflowMetadataDefaults', '_workflow_model.WorkflowMetadataDefaults', ([], {'interruptible': 'self.interruptible'}), '(interruptible=self.interruptible)\n', (3346, 3380), True, 'from flytekit.models.core import workflow as _workflow_model\n'), ((6445, 6501), 'flytekit.core.interface.transform_interface_to_typed_interface', 'transform_interface_to_typed_interface', (['python_interface'], {}), '(python_interface)\n', (6483, 6501), False, 'from flytekit.core.interface import Interface, transform_inputs_to_parameters, transform_interface_to_typed_interface, transform_signature_to_interface\n'), ((6674, 6709), 'flytekit.core.context_manager.FlyteEntities.entities.append', 'FlyteEntities.entities.append', (['self'], {}), '(self)\n', (6703, 6709), False, 'from flytekit.core.context_manager import BranchEvalMode, CompilationState, ExecutionState, FlyteContext, FlyteContextManager, FlyteEntities\n'), ((8466, 8503), 'flytekit.core.context_manager.FlyteContextManager.current_context', 'FlyteContextManager.current_context', ([], {}), '()\n', (8501, 8503), False, 'from flytekit.core.context_manager import BranchEvalMode, CompilationState, ExecutionState, FlyteContext, FlyteContextManager, FlyteEntities\n'), ((14677, 14824), 'flytekit.core.promise.translate_inputs_to_literals', 'translate_inputs_to_literals', (['ctx', 'wf_outputs_as_map'], {'flyte_interface_types': 'self.interface.outputs', 'native_types': 'self.python_interface.outputs'}), '(ctx, wf_outputs_as_map, flyte_interface_types=\n self.interface.outputs, native_types=self.python_interface.outputs)\n', (14705, 14824), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((15069, 15124), 'flytekit.core.promise.create_task_output', 'create_task_output', (['new_promises', 'self.python_interface'], {}), '(new_promises, self.python_interface)\n', (15087, 15124), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((15552, 15579), 'flytekit.core.context_manager.CompilationState', 'CompilationState', ([], {'prefix': '""""""'}), "(prefix='')\n", (15568, 15579), False, 'from flytekit.core.context_manager import BranchEvalMode, CompilationState, ExecutionState, FlyteContext, FlyteContextManager, FlyteEntities\n'), ((21895, 21925), 'flytekit.core.context_manager.FlyteContext.current_context', 'FlyteContext.current_context', ([], {}), '()\n', (21923, 21925), False, 'from flytekit.core.context_manager import BranchEvalMode, CompilationState, ExecutionState, FlyteContext, FlyteContextManager, FlyteEntities\n'), ((23829, 23891), 'flytekit.core.interface.transform_interface_to_typed_interface', 'transform_interface_to_typed_interface', (['self._python_interface'], {}), '(self._python_interface)\n', (23867, 23891), False, 'from flytekit.core.interface import Interface, transform_inputs_to_parameters, transform_interface_to_typed_interface, transform_signature_to_interface\n'), ((25086, 25137), 'flytekit.core.type_engine.TypeEngine.to_literal_type', 'TypeEngine.to_literal_type', ([], {'python_type': 'python_type'}), '(python_type=python_type)\n', (25112, 25137), False, 'from flytekit.core.type_engine import TypeEngine\n'), ((25153, 25183), 'flytekit.core.context_manager.FlyteContext.current_context', 'FlyteContext.current_context', ([], {}), '()\n', (25181, 25183), False, 'from flytekit.core.context_manager import BranchEvalMode, CompilationState, ExecutionState, FlyteContext, FlyteContextManager, FlyteEntities\n'), ((28407, 28444), 'flytekit.core.context_manager.FlyteContextManager.current_context', 'FlyteContextManager.current_context', ([], {}), '()\n', (28442, 28444), False, 'from flytekit.core.context_manager import BranchEvalMode, CompilationState, ExecutionState, FlyteContext, FlyteContextManager, FlyteEntities\n'), ((28478, 28536), 'flytekit.core.interface.transform_inputs_to_parameters', 'transform_inputs_to_parameters', (['ctx', 'self.python_interface'], {}), '(ctx, self.python_interface)\n', (28508, 28536), False, 'from flytekit.core.interface import Interface, transform_inputs_to_parameters, transform_interface_to_typed_interface, transform_signature_to_interface\n'), ((2340, 2416), 'flytekit.common.exceptions.user.FlyteValidationException', 'FlyteValidationException', (['f"""Failure policy {self.on_failure} not acceptable"""'], {}), "(f'Failure policy {self.on_failure} not acceptable')\n", (2364, 2416), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((3171, 3264), 'flytekit.common.exceptions.user.FlyteValidationException', 'FlyteValidationException', (['f"""Interruptible must be boolean, {self.interruptible} invalid"""'], {}), "(\n f'Interruptible must be boolean, {self.interruptible} invalid')\n", (3195, 3264), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((8791, 8883), 'flytekit.core.promise.create_and_link_node', 'create_and_link_node', (['ctx'], {'entity': 'self', 'interface': 'self.python_interface'}), '(ctx, entity=self, interface=self.python_interface, **\n input_kwargs)\n', (8811, 8883), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((13197, 13219), 'flytekit.core.promise.VoidPromise', 'VoidPromise', (['self.name'], {}), '(self.name)\n', (13208, 13219), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((13388, 13501), 'flytekit.common.exceptions.user.FlyteValueException', 'FlyteValueException', (['function_outputs', 'f"""{function_outputs} received but should\'ve been VoidPromise or None."""'], {}), '(function_outputs,\n f"{function_outputs} received but should\'ve been VoidPromise or None.")\n', (13407, 13501), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((14973, 15018), 'flytekit.core.promise.Promise', 'Promise', (['var', 'wf_outputs_as_literal_dict[var]'], {}), '(var, wf_outputs_as_literal_dict[var])\n', (14980, 15018), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((17833, 17904), 'flytekit.common.exceptions.user.FlyteValidationException', 'FlyteValidationException', (['f"""Workflow not ready, wf is currently {self}"""'], {}), "(f'Workflow not ready, wf is currently {self}')\n", (17857, 17904), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((20467, 20489), 'flytekit.core.promise.VoidPromise', 'VoidPromise', (['self.name'], {}), '(self.name)\n', (20478, 20489), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((22152, 22188), 'flytekit.core.node_creation.create_node', 'create_node', ([], {'entity': 'entity'}), '(entity=entity, **kwargs)\n', (22163, 22188), False, 'from flytekit.core.node_creation import create_node\n'), ((23600, 23699), 'flytekit.common.exceptions.user.FlyteValidationException', 'FlyteValidationException', (['f"""Input {input_name} has already been specified for wf {self.name}."""'], {}), "(\n f'Input {input_name} has already been specified for wf {self.name}.')\n", (23624, 23699), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((24434, 24527), 'flytekit.common.exceptions.user.FlyteValidationException', 'FlyteValidationException', (['f"""Output {output_name} already exists in workflow {self.name}"""'], {}), "(\n f'Output {output_name} already exists in workflow {self.name}')\n", (24458, 24527), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((24961, 25073), 'flytekit.loggers.logger.debug', 'logger.debug', (['f"""Inferring python type for wf output {output_name} from Promise provided {python_type}"""'], {}), "(\n f'Inferring python type for wf output {output_name} from Promise provided {python_type}'\n )\n", (24973, 25073), False, 'from flytekit.loggers import logger\n'), ((25410, 25526), 'flytekit.core.promise.binding_from_python_std', 'binding_from_python_std', (['ctx', 'output_name'], {'expected_literal_type': 'flyte_type', 't_value': 'p', 't_value_type': 'python_type'}), '(ctx, output_name, expected_literal_type=flyte_type,\n t_value=p, t_value_type=python_type)\n', (25433, 25526), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((25742, 25804), 'flytekit.core.interface.transform_interface_to_typed_interface', 'transform_interface_to_typed_interface', (['self._python_interface'], {}), '(self._python_interface)\n', (25780, 25804), False, 'from flytekit.core.interface import Interface, transform_inputs_to_parameters, transform_interface_to_typed_interface, transform_signature_to_interface\n'), ((27481, 27517), 'inspect.signature', 'inspect.signature', (['workflow_function'], {}), '(workflow_function)\n', (27498, 27517), False, 'import inspect\n'), ((31099, 31216), 'flytekit.core.promise.binding_from_python_std', 'binding_from_python_std', (['ctx', 'output_names[0]', 'self.interface.outputs[output_names[0]].type', 'workflow_outputs', 't'], {}), '(ctx, output_names[0], self.interface.outputs[\n output_names[0]].type, workflow_outputs, t)\n', (31122, 31216), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((35492, 35541), 'flytekit.core.reference_entity.WorkflowReference', 'WorkflowReference', (['project', 'domain', 'name', 'version'], {}), '(project, domain, name, version)\n', (35509, 35541), False, 'from flytekit.core.reference_entity import ReferenceEntity, WorkflowReference\n'), ((36159, 36180), 'inspect.signature', 'inspect.signature', (['fn'], {}), '(fn)\n', (36176, 36180), False, 'import inspect\n'), ((3493, 3543), 'flytekit.core.promise.NodeOutput', 'NodeOutput', ([], {'node': 'GLOBAL_START_NODE', 'var': 'input_name'}), '(node=GLOBAL_START_NODE, var=input_name)\n', (3503, 3543), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((16194, 16205), 'flytekit.core.interface.Interface', 'Interface', ([], {}), '()\n', (16203, 16205), False, 'from flytekit.core.interface import Interface, transform_inputs_to_parameters, transform_interface_to_typed_interface, transform_signature_to_interface\n'), ((19436, 19531), 'flytekit.common.exceptions.user.FlyteValueException', 'FlyteValueException', (['results', 'f"""{results} received but should\'ve been VoidPromise or None."""'], {}), '(results,\n f"{results} received but should\'ve been VoidPromise or None.")\n', (19455, 19531), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((23955, 24005), 'flytekit.core.promise.NodeOutput', 'NodeOutput', ([], {'node': 'GLOBAL_START_NODE', 'var': 'input_name'}), '(node=GLOBAL_START_NODE, var=input_name)\n', (23965, 24005), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((24629, 24814), 'flytekit.common.exceptions.user.FlyteValidationException', 'FlyteValidationException', (['f"""If specifying a list or dict of Promises, you must specify the python_type type for {output_name} starting with the container type (e.g. List[int]"""'], {}), "(\n f'If specifying a list or dict of Promises, you must specify the python_type type for {output_name} starting with the container type (e.g. List[int]'\n )\n", (24653, 24814), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((4456, 4507), 'flytekit.models.literals.Literal', '_literal_models.Literal', ([], {'scalar': 'binding_data.scalar'}), '(scalar=binding_data.scalar)\n', (4479, 4507), True, 'from flytekit.models import literals as _literal_models\n'), ((11500, 11561), 'flytekit.core.promise.create_native_named_tuple', 'create_native_named_tuple', (['ctx', 'result', 'self.python_interface'], {}), '(ctx, result, self.python_interface)\n', (11525, 11561), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((20021, 20109), 'flytekit.common.exceptions.user.FlyteValueException', 'FlyteValueException', (['results', 'f"""Different lengths {results} {expected_output_names}"""'], {}), "(results,\n f'Different lengths {results} {expected_output_names}')\n", (20040, 20109), False, 'from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException\n'), ((28762, 28813), 'flytekit.core.context_manager.CompilationState', 'CompilationState', ([], {'prefix': 'prefix', 'task_resolver': 'self'}), '(prefix=prefix, task_resolver=self)\n', (28778, 28813), False, 'from flytekit.core.context_manager import BranchEvalMode, CompilationState, ExecutionState, FlyteContext, FlyteContextManager, FlyteEntities\n'), ((29901, 29966), 'flytekit.loggers.logger.debug', 'logger.debug', (['f"""WF {self.name} saving task {n.flyte_entity.name}"""'], {}), "(f'WF {self.name} saving task {n.flyte_entity.name}')\n", (29913, 29966), False, 'from flytekit.loggers import logger\n'), ((32022, 32117), 'flytekit.core.promise.binding_from_python_std', 'binding_from_python_std', (['ctx', 'out', 'self.interface.outputs[out].type', 'workflow_outputs[i]', 't'], {}), '(ctx, out, self.interface.outputs[out].type,\n workflow_outputs[i], t)\n', (32045, 32117), False, 'from flytekit.core.promise import NodeOutput, Promise, VoidPromise, binding_from_python_std, create_and_link_node, create_native_named_tuple, create_task_output, translate_inputs_to_literals\n'), ((9516, 9590), 'collections.namedtuple', 'collections.namedtuple', (['self.python_interface.output_tuple_name', 'variables'], {}), '(self.python_interface.output_tuple_name, variables)\n', (9538, 9590), False, 'import collections\n'), ((12236, 12299), 'flytekit.core.type_engine.TypeEngine.to_literal', 'TypeEngine.to_literal', (['ctx', 'v', 't', 'self.interface.inputs[k].type'], {}), '(ctx, v, t, self.interface.inputs[k].type)\n', (12257, 12299), False, 'from flytekit.core.type_engine import TypeEngine\n'), ((4817, 4869), 'flytekit.models.literals.LiteralCollection', '_literal_models.LiteralCollection', ([], {'literals': 'literals'}), '(literals=literals)\n', (4850, 4869), True, 'from flytekit.models import literals as _literal_models\n'), ((5165, 5210), 'flytekit.models.literals.LiteralMap', '_literal_models.LiteralMap', ([], {'literals': 'literals'}), '(literals=literals)\n', (5191, 5210), True, 'from flytekit.models import literals as _literal_models\n')] |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.autograd import Function, Variable
from torch.nn import Module
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")
if U != max_U + 1:
raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")
def _assert_no_grad(tensor):
assert not tensor.requires_grad, (
"gradients only computed for log_probs - please " "mark other tensors as not requiring gradients"
)
def forward_pass(log_probs, labels, blank):
"""
Computes probability of the forward variable alpha.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the forward variable probabilities - alpha of shape [T, U]
and the log likelihood of this forward step.
"""
T, U, _ = log_probs.shape
alphas = np.zeros((T, U), dtype='f')
for t in range(1, T):
alphas[t, 0] = alphas[t - 1, 0] + log_probs[t - 1, 0, blank]
for u in range(1, U):
alphas[0, u] = alphas[0, u - 1] + log_probs[0, u - 1, labels[u - 1]]
for t in range(1, T):
for u in range(1, U):
no_emit = alphas[t - 1, u] + log_probs[t - 1, u, blank]
emit = alphas[t, u - 1] + log_probs[t, u - 1, labels[u - 1]]
alphas[t, u] = np.logaddexp(emit, no_emit)
loglike = alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank]
return alphas, loglike
def backward_pass(log_probs, labels, blank):
"""
Computes probability of the backward variable beta.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the backward variable probabilities - beta of shape [T, U]
and the log likelihood of this backward step.
"""
T, U, _ = log_probs.shape
betas = np.zeros((T, U), dtype='f')
betas[T - 1, U - 1] = log_probs[T - 1, U - 1, blank]
for t in reversed(range(T - 1)):
betas[t, U - 1] = betas[t + 1, U - 1] + log_probs[t, U - 1, blank]
for u in reversed(range(U - 1)):
betas[T - 1, u] = betas[T - 1, u + 1] + log_probs[T - 1, u, labels[u]]
for t in reversed(range(T - 1)):
for u in reversed(range(U - 1)):
no_emit = betas[t + 1, u] + log_probs[t, u, blank]
emit = betas[t, u + 1] + log_probs[t, u, labels[u]]
betas[t, u] = np.logaddexp(emit, no_emit)
return betas, betas[0, 0]
def compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda):
"""
Computes the gradients of the log_probs with respect to the log probability of this step occuring.
Args:
Args:
log_probs: Tensor of shape [T, U, V+1]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Tensor of shape [T, U] which represents the backward variable.
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
Gradients of shape [T, U, V+1] with respect to the forward log probability
"""
T, U, _ = log_probs.shape
grads = np.full(log_probs.shape, -float("inf"))
log_like = betas[0, 0] # == alphas[T - 1, U - 1] + betas[T - 1, U - 1]
# // grad to last blank transition
grads[T - 1, U - 1, blank] = alphas[T - 1, U - 1]
grads[: T - 1, :, blank] = alphas[: T - 1, :] + betas[1:, :]
# // grad to label transition
for u, l in enumerate(labels):
grads[:, u, l] = alphas[:, u] + betas[:, u + 1]
grads = -np.exp(grads + log_probs - log_like)
if fastemit_lambda > 0.0:
for u, l in enumerate(labels):
grads[:, u, l] = (1.0 + fastemit_lambda) * grads[:, u, l]
return grads
def fastemit_regularization(log_probs, labels, alphas, betas, blank, fastemit_lambda):
"""
Describes the computation of FastEmit regularization from the paper -
[FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148)
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Unused. Labels of shape [B, U]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Unused. Tensor of shape [T, U] which represents the backward variable.
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
The regularized negative log likelihood - lambda * P˜(At, u|x)
"""
# General calculation of the fastemit regularization alignments
T, U, _ = log_probs.shape
# alignment = np.zeros((T, U), dtype='float32')
#
# for t in range(0, T):
# alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1]
#
# for t in range(0, T):
# for u in range(0, U - 1):
# emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1]
# alignment[t, u] = emit
# reg = fastemit_lambda * (alignment[T - 1, U - 1])
# The above is equivalent to below, without need of computing above
# reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1])
# The above is also equivalent to below, without need of computing the betas alignment matrix
reg = fastemit_lambda * (alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank])
return -reg
def transduce(log_probs, labels, blank=0, fastemit_lambda=0.0):
"""
Args:
log_probs: 3D array with shape
[input len, output len + 1, vocab size]
labels: 1D array with shape [output time steps]
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
float: The negative log-likelihood
3D array: Gradients with respect to the
unnormalized input actications
2d arrays: Alphas matrix (TxU)
2d array: Betas matrix (TxU)
"""
alphas, ll_forward = forward_pass(log_probs, labels, blank)
betas, ll_backward = backward_pass(log_probs, labels, blank)
grads = compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda)
return -ll_forward, grads, alphas, betas
def transduce_batch(log_probs, labels, flen, glen, blank=0, fastemit_lambda=0.0):
"""
Compute the transducer loss of the batch.
Args:
log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax.
labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning.
flen: Length vector of the acoustic sequence.
glen: Length vector of the target sequence.
blank: Id of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix.
"""
grads = np.zeros_like(log_probs)
costs = []
for b in range(log_probs.shape[0]):
t = int(flen[b])
u = int(glen[b]) + 1
ll, g, alphas, betas = transduce(log_probs[b, :t, :u, :], labels[b, : u - 1], blank, fastemit_lambda)
grads[b, :t, :u, :] = g
reg = fastemit_regularization(
log_probs[b, :t, :u, :], labels[b, : u - 1], alphas, betas, blank, fastemit_lambda
)
ll += reg
costs.append(ll)
return costs, grads
class _RNNT(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, fastemit_lambda):
costs, grads = transduce_batch(
acts.detach().cpu().numpy(),
labels.cpu().numpy(),
act_lens.cpu().numpy(),
label_lens.cpu().numpy(),
blank,
fastemit_lambda,
)
costs = torch.FloatTensor([sum(costs)])
grads = torch.Tensor(grads).to(acts)
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
return ctx.grads, None, None, None, None, None
class RNNTLoss(Module):
"""
Parameters:
`blank_label` (int): default 0 - label index of blank token
fastemit_lambda: Float scaling factor for FastEmit regularization.
"""
def __init__(self, blank: int = 0, fastemit_lambda: float = 0.0):
super(RNNTLoss, self).__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.rnnt = _RNNT.apply
def forward(self, acts, labels, act_lens, label_lens):
assert len(labels.size()) == 2
_assert_no_grad(labels)
_assert_no_grad(act_lens)
_assert_no_grad(label_lens)
certify_inputs(acts, labels, act_lens, label_lens)
acts = torch.nn.functional.log_softmax(acts, -1)
return self.rnnt(acts, labels, act_lens, label_lens, self.blank, self.fastemit_lambda)
if __name__ == '__main__':
loss = RNNTLoss(fastemit_lambda=0.01)
torch.manual_seed(0)
acts = torch.randn(1, 2, 5, 3)
labels = torch.tensor([[0, 2, 1, 2]], dtype=torch.int32)
act_lens = torch.tensor([2], dtype=torch.int32)
label_lens = torch.tensor([len(labels[0])], dtype=torch.int32)
loss_val = loss(acts, labels, act_lens, label_lens)
| [
"torch.manual_seed",
"torch.max",
"torch.Tensor",
"numpy.logaddexp",
"numpy.exp",
"torch.tensor",
"numpy.zeros",
"torch.nn.functional.log_softmax",
"numpy.zeros_like",
"torch.randn"
] | [((2794, 2812), 'torch.max', 'torch.max', (['lengths'], {}), '(lengths)\n', (2803, 2812), False, 'import torch\n'), ((2825, 2849), 'torch.max', 'torch.max', (['label_lengths'], {}), '(label_lengths)\n', (2834, 2849), False, 'import torch\n'), ((3774, 3801), 'numpy.zeros', 'np.zeros', (['(T, U)'], {'dtype': '"""f"""'}), "((T, U), dtype='f')\n", (3782, 3801), True, 'import numpy as np\n'), ((4795, 4822), 'numpy.zeros', 'np.zeros', (['(T, U)'], {'dtype': '"""f"""'}), "((T, U), dtype='f')\n", (4803, 4822), True, 'import numpy as np\n'), ((9803, 9827), 'numpy.zeros_like', 'np.zeros_like', (['log_probs'], {}), '(log_probs)\n', (9816, 9827), True, 'import numpy as np\n'), ((11824, 11844), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (11841, 11844), False, 'import torch\n'), ((11857, 11880), 'torch.randn', 'torch.randn', (['(1)', '(2)', '(5)', '(3)'], {}), '(1, 2, 5, 3)\n', (11868, 11880), False, 'import torch\n'), ((11894, 11941), 'torch.tensor', 'torch.tensor', (['[[0, 2, 1, 2]]'], {'dtype': 'torch.int32'}), '([[0, 2, 1, 2]], dtype=torch.int32)\n', (11906, 11941), False, 'import torch\n'), ((11957, 11993), 'torch.tensor', 'torch.tensor', (['[2]'], {'dtype': 'torch.int32'}), '([2], dtype=torch.int32)\n', (11969, 11993), False, 'import torch\n'), ((6460, 6496), 'numpy.exp', 'np.exp', (['(grads + log_probs - log_like)'], {}), '(grads + log_probs - log_like)\n', (6466, 6496), True, 'import numpy as np\n'), ((11611, 11652), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['acts', '(-1)'], {}), '(acts, -1)\n', (11642, 11652), False, 'import torch\n'), ((4226, 4253), 'numpy.logaddexp', 'np.logaddexp', (['emit', 'no_emit'], {}), '(emit, no_emit)\n', (4238, 4253), True, 'import numpy as np\n'), ((5342, 5369), 'numpy.logaddexp', 'np.logaddexp', (['emit', 'no_emit'], {}), '(emit, no_emit)\n', (5354, 5369), True, 'import numpy as np\n'), ((10729, 10748), 'torch.Tensor', 'torch.Tensor', (['grads'], {}), '(grads)\n', (10741, 10748), False, 'import torch\n')] |
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user1_name, user2_name, tweet_text):
"""
Determine and return which user is more likely to say a given Tweet.
Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!')
Returns 1 corresponding to 1st user passed in, or 0 for second.
"""
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
user1_vect = np.array([tweet.vect for tweet in user1.tweets])
user2_vect = np.array([tweet.vect for tweet in user2.tweets])
vects = np.vstack([user1_vect, user2_vect])
labels = np.concatenate([np.ones(len(user1.tweets)),
np.zeros(len(user2.tweets))])
log_reg = LogisticRegression().fit(vects, labels)
# We've done the model fitting, now to predict...
hypo_tweet_vect = vectorize_tweet(tweet_text)
return log_reg.predict(np.array(hypo_tweet_vect).reshape(1,-1))
| [
"numpy.array",
"numpy.vstack",
"sklearn.linear_model.LogisticRegression"
] | [((561, 609), 'numpy.array', 'np.array', (['[tweet.vect for tweet in user1.tweets]'], {}), '([tweet.vect for tweet in user1.tweets])\n', (569, 609), True, 'import numpy as np\n'), ((627, 675), 'numpy.array', 'np.array', (['[tweet.vect for tweet in user2.tweets]'], {}), '([tweet.vect for tweet in user2.tweets])\n', (635, 675), True, 'import numpy as np\n'), ((689, 724), 'numpy.vstack', 'np.vstack', (['[user1_vect, user2_vect]'], {}), '([user1_vect, user2_vect])\n', (698, 724), True, 'import numpy as np\n'), ((856, 876), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (874, 876), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1027, 1052), 'numpy.array', 'np.array', (['hypo_tweet_vect'], {}), '(hypo_tweet_vect)\n', (1035, 1052), True, 'import numpy as np\n')] |
# sys
import os
import sys
import numpy as np
import random
import pickle
import json
# torch
import torch
import torch.nn as nn
from torchvision import datasets, transforms
# operation
from . import tools
class Feeder_UCF(torch.utils.data.Dataset):
""" Feeder for skeleton-based action recognition in kinetics-skeleton dataset
Arguments:
data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)
label_path: the path to label
random_choose: If true, randomly choose a portion of the input sequence
random_shift: If true, randomly pad zeros at the begining or end of sequence
random_move: If true, perform randomly but continuously changed transformation to input sequence
window_size: The length of the output sequence
pose_matching: If ture, match the pose between two frames
num_person_in: The number of people the feeder can observe in the input sequence
num_person_out: The number of people the feeder in the output sequence
debug: If true, only use the first 100 samples
"""
def __init__(self,
data_path,
label_path,
ignore_empty_sample=True,
random_choose=False,
random_shift=False,
random_move=False,
window_size=-1,
pose_matching=False,
num_person_in=5,
num_person_out=2,
debug=False):
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.num_person_in = num_person_in
self.num_person_out = num_person_out
self.pose_matching = pose_matching
self.ignore_empty_sample = ignore_empty_sample
self.load_data()
def load_data(self):
# load file list
self.sample_name = os.listdir(self.data_path)
if self.debug:
self.sample_name = self.sample_name[0:2]
# load label
label_path = self.label_path
with open(label_path) as f:
label_info = json.load(f)
sample_id = [name.split('.')[0] for name in self.sample_name]
self.label = np.array(
[label_info[id]['label_index'] for id in sample_id])
has_skeleton = np.array(
[label_info[id]['has_skeleton'] for id in sample_id])
# ignore the samples which does not has skeleton sequence
if self.ignore_empty_sample:
self.sample_name = [
s for h, s in zip(has_skeleton, self.sample_name) if h
]
self.label = self.label[has_skeleton]
# output data shape (N, C, T, V, M)
self.N = len(self.sample_name) #sample
self.C = 3 #channel
self.T = 90000 #frame
self.V = 18 #joint
self.M = self.num_person_out #person
def __len__(self):
return len(self.sample_name)
def __iter__(self):
return self
def __getitem__(self, index):
# output shape (C, T, V, M)
# get data
sample_name = self.sample_name[index]
sample_path = os.path.join(self.data_path, sample_name)
with open(sample_path, 'r') as f:
video_info = json.load(f)
# fill data_numpy
data_numpy = np.zeros((self.C, self.T, self.V, self.num_person_in))
count = 0
for frame_info in video_info['data']:
frame_index = frame_info['frame_index']
for m, skeleton_info in enumerate(frame_info["skeleton"]):
if m >= self.num_person_in:
break
pose = skeleton_info['pose']
score = skeleton_info['score']
frame_index = int(frame_index)
# print(frame_index)
data_numpy[0, frame_index, :, m] = pose[0::2]
data_numpy[1, frame_index, :, m] = pose[1::2]
data_numpy[2, frame_index, :, m] = score
# count += 1
# print(" ",count, " ")
# centralization
data_numpy[0:2] = data_numpy[0:2] - 0.5
data_numpy[0][data_numpy[2] == 0] = 0
data_numpy[1][data_numpy[2] == 0] = 0
# get & check label index
label = video_info['label_index']
assert (self.label[index] == label)
# data augmentation
if self.random_shift:
data_numpy = tools.random_shift(data_numpy)
if self.random_choose:
data_numpy = tools.random_choose(data_numpy, self.window_size)
elif self.window_size > 0:
data_numpy = tools.auto_pading(data_numpy, self.window_size)
if self.random_move:
data_numpy = tools.random_move(data_numpy)
# sort by score
sort_index = (-data_numpy[2, :, :, :].sum(axis=1)).argsort(axis=1)
for t, s in enumerate(sort_index):
data_numpy[:, t, :, :] = data_numpy[:, t, :, s].transpose((1, 2,
0))
data_numpy = data_numpy[:, :, :, 0:self.num_person_out]
# match poses between 2 frames
if self.pose_matching:
data_numpy = tools.openpose_match(data_numpy)
return data_numpy, label
def top_k(self, score, top_k):
assert (all(self.label >= 0))
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def top_k_by_category(self, score, top_k):
assert (all(self.label >= 0))
return tools.top_k_by_category(self.label, score, top_k)
def calculate_recall_precision(self, score):
assert (all(self.label >= 0))
return tools.calculate_recall_precision(self.label, score)
| [
"os.listdir",
"os.path.join",
"numpy.array",
"numpy.zeros",
"json.load"
] | [((2055, 2081), 'os.listdir', 'os.listdir', (['self.data_path'], {}), '(self.data_path)\n', (2065, 2081), False, 'import os\n'), ((2393, 2454), 'numpy.array', 'np.array', (["[label_info[id]['label_index'] for id in sample_id]"], {}), "([label_info[id]['label_index'] for id in sample_id])\n", (2401, 2454), True, 'import numpy as np\n'), ((2491, 2553), 'numpy.array', 'np.array', (["[label_info[id]['has_skeleton'] for id in sample_id]"], {}), "([label_info[id]['has_skeleton'] for id in sample_id])\n", (2499, 2553), True, 'import numpy as np\n'), ((3330, 3371), 'os.path.join', 'os.path.join', (['self.data_path', 'sample_name'], {}), '(self.data_path, sample_name)\n', (3342, 3371), False, 'import os\n'), ((3500, 3554), 'numpy.zeros', 'np.zeros', (['(self.C, self.T, self.V, self.num_person_in)'], {}), '((self.C, self.T, self.V, self.num_person_in))\n', (3508, 3554), True, 'import numpy as np\n'), ((2279, 2291), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2288, 2291), False, 'import json\n'), ((3439, 3451), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3448, 3451), False, 'import json\n')] |
import os
import subprocess
import time
import grpc
import tests.rpc.proto.schema_registry_pb2 as pb2
import tests.rpc.proto.schema_registry_pb2_grpc as pb2_grpc
from tests.common.postgres import PostgresConfig
EXE = os.getenv('SCHEMA_REGISTRY_EXE') or 'schema-registry'
class SchemaRegistry:
def __init__(self,
edge_registry_addr,
kafka_brokers,
postgres_config: PostgresConfig,
kafka_group_id='schema_registry',
input_port='50101',
initial_schema=None):
self.edge_registry_addr = edge_registry_addr
self.kafka_brokers = kafka_brokers
self.kafka_group_id = kafka_group_id
self.input_port = input_port
self.postgres_config = postgres_config
self.initial_schema = initial_schema
self.svc = None
def start(self):
env = {
"SCHEMA_REGISTRY_COMMUNICATION_METHOD": 'kafka',
"SCHEMA_REGISTRY_KAFKA__BROKERS": self.kafka_brokers,
"SCHEMA_REGISTRY_KAFKA__GROUP_ID": self.kafka_group_id,
"SCHEMA_REGISTRY_INPUT_PORT": self.input_port,
"SCHEMA_REGISTRY_MONITORING__OTEL_SERVICE_NAME": 'schema-registry',
"SCHEMA_REGISTRY_MONITORING__STATUS_PORT": '0',
"SCHEMA_REGISTRY_SERVICES__EDGE_REGISTRY_URL": self.edge_registry_addr,
**self.postgres_config.to_dict("SCHEMA_REGISTRY")
}
if self.initial_schema is not None:
env.update(SCHEMA_REGISTRY_IMPORT_FILE=self.initial_schema)
self.svc = subprocess.Popen([EXE], env=env)
time.sleep(3)
return self
def stop(self):
self.svc.kill()
def create_schema(self, name, destination, query, body, schema_type):
with grpc.insecure_channel(f"localhost:{self.input_port}") as channel:
stub = pb2_grpc.SchemaRegistryStub(channel)
resp = stub.AddSchema(
pb2.NewSchema(
definition=bytes(body, 'utf-8'),
name=name,
insert_destination=destination,
query_address=query,
schema_type=pb2.SchemaType(schema_type=schema_type)))
return resp.id
| [
"tests.rpc.proto.schema_registry_pb2.SchemaType",
"os.getenv",
"tests.rpc.proto.schema_registry_pb2_grpc.SchemaRegistryStub",
"subprocess.Popen",
"grpc.insecure_channel",
"time.sleep"
] | [((219, 251), 'os.getenv', 'os.getenv', (['"""SCHEMA_REGISTRY_EXE"""'], {}), "('SCHEMA_REGISTRY_EXE')\n", (228, 251), False, 'import os\n'), ((1585, 1617), 'subprocess.Popen', 'subprocess.Popen', (['[EXE]'], {'env': 'env'}), '([EXE], env=env)\n', (1601, 1617), False, 'import subprocess\n'), ((1626, 1639), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1636, 1639), False, 'import time\n'), ((1794, 1847), 'grpc.insecure_channel', 'grpc.insecure_channel', (['f"""localhost:{self.input_port}"""'], {}), "(f'localhost:{self.input_port}')\n", (1815, 1847), False, 'import grpc\n'), ((1879, 1915), 'tests.rpc.proto.schema_registry_pb2_grpc.SchemaRegistryStub', 'pb2_grpc.SchemaRegistryStub', (['channel'], {}), '(channel)\n', (1906, 1915), True, 'import tests.rpc.proto.schema_registry_pb2_grpc as pb2_grpc\n'), ((2191, 2230), 'tests.rpc.proto.schema_registry_pb2.SchemaType', 'pb2.SchemaType', ([], {'schema_type': 'schema_type'}), '(schema_type=schema_type)\n', (2205, 2230), True, 'import tests.rpc.proto.schema_registry_pb2 as pb2\n')] |
"""
When a routing policy is set with an empty condition, it should be loaded correctly and should route all
the requests to a correct backend.
"""
from urllib.parse import urlparse
import pytest
from packaging.version import Version # noqa # pylint: disable=unused-import
from testsuite import TESTED_VERSION, rawobj # noqa # pylint: disable=unused-import
from testsuite.echoed_request import EchoedRequest
pytestmark = [
pytest.mark.skipif("TESTED_VERSION < Version('2.11')"),
pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-6415")]
@pytest.fixture(scope="module")
def service_proxy_settings(private_base_url):
"""
Asserts, that echo api is used as the default backend
"""
return rawobj.Proxy(private_base_url("echo_api"))
@pytest.fixture(scope="module")
def service(service, private_base_url):
"""
Set the routing policy to route all requests to httpbin.
(Using the logic that an empty condition should act as a catch all rule)
"""
proxy = service.proxy.list()
proxy.policies.insert(0, rawobj.PolicyConfig(
"routing", {
"rules": [
{
"url": private_base_url("httpbin"),
"condition": {},
}]}))
return service
def test_routing_policy_without_header(api_client, private_base_url):
"""
Sends a request and asserts, that the routing policy is active and the
requests is routed to the correct backend (httpbin)
"""
parsed_url = urlparse(private_base_url("httpbin"))
response = api_client().get("/get")
assert response.status_code == 200
echoed_request = EchoedRequest.create(response)
assert echoed_request.headers["Host"] == parsed_url.hostname
| [
"pytest.fixture",
"testsuite.echoed_request.EchoedRequest.create",
"pytest.mark.issue",
"pytest.mark.skipif"
] | [((565, 595), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (579, 595), False, 'import pytest\n'), ((773, 803), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (787, 803), False, 'import pytest\n'), ((431, 485), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""TESTED_VERSION < Version(\'2.11\')"""'], {}), '("TESTED_VERSION < Version(\'2.11\')")\n', (449, 485), False, 'import pytest\n'), ((491, 560), 'pytest.mark.issue', 'pytest.mark.issue', (['"""https://issues.redhat.com/browse/THREESCALE-6415"""'], {}), "('https://issues.redhat.com/browse/THREESCALE-6415')\n", (508, 560), False, 'import pytest\n'), ((1647, 1677), 'testsuite.echoed_request.EchoedRequest.create', 'EchoedRequest.create', (['response'], {}), '(response)\n', (1667, 1677), False, 'from testsuite.echoed_request import EchoedRequest\n')] |
import contextlib
import ctypes
import json
import os
import shutil
import struct
import subprocess
import sys
import tempfile
from datetime import datetime, timedelta, timezone
from enum import Enum, auto
from hashlib import pbkdf2_hmac
from .aes import (
aes_cbc_decrypt_bytes,
aes_gcm_decrypt_and_verify_bytes,
unpad_pkcs7,
)
from .compat import compat_b64decode, compat_cookiejar_Cookie
from .minicurses import MultilinePrinter, QuietMultilinePrinter
from .utils import Popen, YoutubeDLCookieJar, error_to_str, expand_path
try:
import sqlite3
SQLITE_AVAILABLE = True
except ImportError:
# although sqlite3 is part of the standard library, it is possible to compile python without
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
SQLITE_AVAILABLE = False
try:
import secretstorage
SECRETSTORAGE_AVAILABLE = True
except ImportError:
SECRETSTORAGE_AVAILABLE = False
SECRETSTORAGE_UNAVAILABLE_REASON = (
'as the `secretstorage` module is not installed. '
'Please install by running `python3 -m pip install secretstorage`.')
except Exception as _err:
SECRETSTORAGE_AVAILABLE = False
SECRETSTORAGE_UNAVAILABLE_REASON = f'as the `secretstorage` module could not be initialized. {_err}'
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
class YDLLogger:
def __init__(self, ydl=None):
self._ydl = ydl
def debug(self, message):
if self._ydl:
self._ydl.write_debug(message)
def info(self, message):
if self._ydl:
self._ydl.to_screen(f'[Cookies] {message}')
def warning(self, message, only_once=False):
if self._ydl:
self._ydl.report_warning(message, only_once)
def error(self, message):
if self._ydl:
self._ydl.report_error(message)
def progress_bar(self):
"""Return a context manager with a print method. (Optional)"""
# Do not print to files/pipes, loggers, or when --no-progress is used
if not self._ydl or self._ydl.params.get('noprogress') or self._ydl.params.get('logger'):
return
file = self._ydl._out_files['error']
try:
if not file.isatty():
return
except BaseException:
return
printer = MultilinePrinter(file, preserve_output=False)
printer.print = lambda message: printer.print_at_line(f'[Cookies] {message}', 0)
return printer
def _create_progress_bar(logger):
if hasattr(logger, 'progress_bar'):
printer = logger.progress_bar()
if printer:
return printer
printer = QuietMultilinePrinter()
printer.print = lambda _: None
return printer
def load_cookies(cookie_file, browser_specification, ydl):
cookie_jars = []
if browser_specification is not None:
browser_name, profile, keyring = _parse_browser_specification(*browser_specification)
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring))
if cookie_file is not None:
cookie_file = expand_path(cookie_file)
jar = YoutubeDLCookieJar(cookie_file)
if os.access(cookie_file, os.R_OK):
jar.load(ignore_discard=True, ignore_expires=True)
cookie_jars.append(jar)
return _merge_cookie_jars(cookie_jars)
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None):
if browser_name == 'firefox':
return _extract_firefox_cookies(profile, logger)
elif browser_name == 'safari':
return _extract_safari_cookies(profile, logger)
elif browser_name in CHROMIUM_BASED_BROWSERS:
return _extract_chrome_cookies(browser_name, profile, keyring, logger)
else:
raise ValueError(f'unknown browser: {browser_name}')
def _extract_firefox_cookies(profile, logger):
logger.info('Extracting cookies from firefox')
if not SQLITE_AVAILABLE:
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
if profile is None:
search_root = _firefox_browser_dir()
elif _is_path(profile):
search_root = profile
else:
search_root = os.path.join(_firefox_browser_dir(), profile)
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies')
jar = YoutubeDLCookieJar()
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
logger.info(f'Extracted {len(jar)} cookies from firefox')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _firefox_browser_dir():
if sys.platform in ('linux', 'linux2'):
return os.path.expanduser('~/.mozilla/firefox')
elif sys.platform == 'win32':
return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles')
elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/Firefox')
else:
raise ValueError(f'unsupported platform: {sys.platform}')
def _get_chromium_based_browser_settings(browser_name):
# https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md
if sys.platform in ('linux', 'linux2'):
config = _config_home()
browser_dir = {
'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(config, 'google-chrome'),
'chromium': os.path.join(config, 'chromium'),
'edge': os.path.join(config, 'microsoft-edge'),
'opera': os.path.join(config, 'opera'),
'vivaldi': os.path.join(config, 'vivaldi'),
}[browser_name]
elif sys.platform == 'win32':
appdata_local = os.path.expandvars('%LOCALAPPDATA%')
appdata_roaming = os.path.expandvars('%APPDATA%')
browser_dir = {
'brave': os.path.join(appdata_local, R'BraveSoftware\Brave-Browser\User Data'),
'chrome': os.path.join(appdata_local, R'Google\Chrome\User Data'),
'chromium': os.path.join(appdata_local, R'Chromium\User Data'),
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
}[browser_name]
elif sys.platform == 'darwin':
appdata = os.path.expanduser('~/Library/Application Support')
browser_dir = {
'brave': os.path.join(appdata, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(appdata, 'Google/Chrome'),
'chromium': os.path.join(appdata, 'Chromium'),
'edge': os.path.join(appdata, 'Microsoft Edge'),
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
'vivaldi': os.path.join(appdata, 'Vivaldi'),
}[browser_name]
else:
raise ValueError(f'unsupported platform: {sys.platform}')
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
keyring_name = {
'brave': 'Brave',
'chrome': 'Chrome',
'chromium': 'Chromium',
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
}[browser_name]
browsers_without_profiles = {'opera'}
return {
'browser_dir': browser_dir,
'keyring_name': keyring_name,
'supports_profiles': browser_name not in browsers_without_profiles
}
def _extract_chrome_cookies(browser_name, profile, keyring, logger):
logger.info(f'Extracting cookies from {browser_name}')
if not SQLITE_AVAILABLE:
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
config = _get_chromium_based_browser_settings(browser_name)
if profile is None:
search_root = config['browser_dir']
elif _is_path(profile):
search_root = profile
config['browser_dir'] = os.path.dirname(profile) if config['supports_profiles'] else profile
else:
if config['supports_profiles']:
search_root = os.path.join(config['browser_dir'], profile)
else:
logger.error(f'{browser_name} does not support profiles')
search_root = config['browser_dir']
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.connection.text_factory = bytes
column_names = _get_column_names(cursor, 'cookies')
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
cursor.execute(f'SELECT host_key, name, value, encrypted_value, path, expires_utc, {secure_column} FROM cookies')
jar = YoutubeDLCookieJar()
failed_cookies = 0
unencrypted_cookies = 0
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, line in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
is_encrypted, cookie = _process_chrome_cookie(decryptor, *line)
if not cookie:
failed_cookies += 1
continue
elif not is_encrypted:
unencrypted_cookies += 1
jar.set_cookie(cookie)
if failed_cookies > 0:
failed_message = f' ({failed_cookies} could not be decrypted)'
else:
failed_message = ''
logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}')
counts = decryptor.cookie_counts.copy()
counts['unencrypted'] = unencrypted_cookies
logger.debug(f'cookie version breakdown: {counts}')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, path, expires_utc, is_secure):
host_key = host_key.decode('utf-8')
name = name.decode('utf-8')
value = value.decode('utf-8')
path = path.decode('utf-8')
is_encrypted = not value and encrypted_value
if is_encrypted:
value = decryptor.decrypt(encrypted_value)
if value is None:
return is_encrypted, None
return is_encrypted, compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
comment=None, comment_url=None, rest={})
class ChromeCookieDecryptor:
"""
Overview:
Linux:
- cookies are either v10 or v11
- v10: AES-CBC encrypted with a fixed key
- v11: AES-CBC encrypted with an OS protected key (keyring)
- v11 keys can be stored in various places depending on the activate desktop environment [2]
Mac:
- cookies are either v10 or not v10
- v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux
- not v10: 'old data' stored as plaintext
Windows:
- cookies are either v10 or not v10
- v10: AES-GCM encrypted with a key which is encrypted with DPAPI
- not v10: encrypted with DPAPI
Sources:
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/
- [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_linux.cc
- KeyStorageLinux::CreateService
"""
def decrypt(self, encrypted_value):
raise NotImplementedError('Must be implemented by sub classes')
@property
def cookie_counts(self):
raise NotImplementedError('Must be implemented by sub classes')
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None):
if sys.platform in ('linux', 'linux2'):
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring)
elif sys.platform == 'darwin':
return MacChromeCookieDecryptor(browser_keyring_name, logger)
elif sys.platform == 'win32':
return WindowsChromeCookieDecryptor(browser_root, logger)
else:
raise NotImplementedError(f'Chrome cookie decryption is not supported on this platform: {sys.platform}')
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger, *, keyring=None):
self._logger = logger
self._v10_key = self.derive_key(b'peanuts')
password = _get_linux_keyring_password(browser_keyring_name, keyring, logger)
self._v11_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc
return pbkdf2_sha1(password, salt=b'<PASSWORD>', iterations=1, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
elif version == b'v11':
self._cookie_counts['v11'] += 1
if self._v11_key is None:
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v11_key, self._logger)
else:
self._cookie_counts['other'] += 1
return None
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger):
self._logger = logger
password = _get_mac_keyring_password(browser_keyring_name, logger)
self._v10_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return pbkdf2_sha1(password, salt=b'<PASSWORD>', iterations=1003, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
else:
self._cookie_counts['other'] += 1
# other prefixes are considered 'old data' which were stored as plaintext
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return encrypted_value
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_root, logger):
self._logger = logger
self._v10_key = _get_windows_v10_key(browser_root, logger)
self._cookie_counts = {'v10': 0, 'other': 0}
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
# kNonceLength
nonce_length = 96 // 8
# boringssl
# EVP_AEAD_AES_GCM_TAG_LEN
authentication_tag_length = 16
raw_ciphertext = ciphertext
nonce = raw_ciphertext[:nonce_length]
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
authentication_tag = raw_ciphertext[-authentication_tag_length:]
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
else:
self._cookie_counts['other'] += 1
# any other prefix means the data is DPAPI encrypted
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
return _decrypt_windows_dpapi(encrypted_value, self._logger).decode('utf-8')
def _extract_safari_cookies(profile, logger):
if profile is not None:
logger.error('safari does not support profiles')
if sys.platform != 'darwin':
raise ValueError(f'unsupported platform: {sys.platform}')
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
logger.debug('Trying secondary cookie location')
cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
raise FileNotFoundError('could not find safari cookies database')
with open(cookies_path, 'rb') as f:
cookies_data = f.read()
jar = parse_safari_cookies(cookies_data, logger=logger)
logger.info(f'Extracted {len(jar)} cookies from safari')
return jar
class ParserError(Exception):
pass
class DataParser:
def __init__(self, data, logger):
self._data = data
self.cursor = 0
self._logger = logger
def read_bytes(self, num_bytes):
if num_bytes < 0:
raise ParserError(f'invalid read of {num_bytes} bytes')
end = self.cursor + num_bytes
if end > len(self._data):
raise ParserError('reached end of input')
data = self._data[self.cursor:end]
self.cursor = end
return data
def expect_bytes(self, expected_value, message):
value = self.read_bytes(len(expected_value))
if value != expected_value:
raise ParserError(f'unexpected value: {value} != {expected_value} ({message})')
def read_uint(self, big_endian=False):
data_format = '>I' if big_endian else '<I'
return struct.unpack(data_format, self.read_bytes(4))[0]
def read_double(self, big_endian=False):
data_format = '>d' if big_endian else '<d'
return struct.unpack(data_format, self.read_bytes(8))[0]
def read_cstring(self):
buffer = []
while True:
c = self.read_bytes(1)
if c == b'\x00':
return b''.join(buffer).decode('utf-8')
else:
buffer.append(c)
def skip(self, num_bytes, description='unknown'):
if num_bytes > 0:
self._logger.debug(f'skipping {num_bytes} bytes ({description}): {self.read_bytes(num_bytes)!r}')
elif num_bytes < 0:
raise ParserError(f'invalid skip of {num_bytes} bytes')
def skip_to(self, offset, description='unknown'):
self.skip(offset - self.cursor, description)
def skip_to_end(self, description='unknown'):
self.skip_to(len(self._data), description)
def _mac_absolute_time_to_posix(timestamp):
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
def _parse_safari_cookies_header(data, logger):
p = DataParser(data, logger)
p.expect_bytes(b'cook', 'database signature')
number_of_pages = p.read_uint(big_endian=True)
page_sizes = [p.read_uint(big_endian=True) for _ in range(number_of_pages)]
return page_sizes, p.cursor
def _parse_safari_cookies_page(data, jar, logger):
p = DataParser(data, logger)
p.expect_bytes(b'\x00\x00\x01\x00', 'page signature')
number_of_cookies = p.read_uint()
record_offsets = [p.read_uint() for _ in range(number_of_cookies)]
if number_of_cookies == 0:
logger.debug(f'a cookies page of size {len(data)} has no cookies')
return
p.skip_to(record_offsets[0], 'unknown page header field')
with _create_progress_bar(logger) as progress_bar:
for i, record_offset in enumerate(record_offsets):
progress_bar.print(f'Loading cookie {i: 6d}/{number_of_cookies: 6d}')
p.skip_to(record_offset, 'space between records')
record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger)
p.read_bytes(record_length)
p.skip_to_end('space in between pages')
def _parse_safari_cookies_record(data, jar, logger):
p = DataParser(data, logger)
record_size = p.read_uint()
p.skip(4, 'unknown record field 1')
flags = p.read_uint()
is_secure = bool(flags & 0x0001)
p.skip(4, 'unknown record field 2')
domain_offset = p.read_uint()
name_offset = p.read_uint()
path_offset = p.read_uint()
value_offset = p.read_uint()
p.skip(8, 'unknown record field 3')
expiration_date = _mac_absolute_time_to_posix(p.read_double())
_creation_date = _mac_absolute_time_to_posix(p.read_double()) # noqa: F841
try:
p.skip_to(domain_offset)
domain = p.read_cstring()
p.skip_to(name_offset)
name = p.read_cstring()
p.skip_to(path_offset)
path = p.read_cstring()
p.skip_to(value_offset)
value = p.read_cstring()
except UnicodeDecodeError:
logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True)
return record_size
p.skip_to(record_size, 'space at the end of the record')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
return record_size
def parse_safari_cookies(data, jar=None, logger=YDLLogger()):
"""
References:
- https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc
- this data appears to be out of date but the important parts of the database structure is the same
- there are a few bytes here and there which are skipped during parsing
"""
if jar is None:
jar = YoutubeDLCookieJar()
page_sizes, body_start = _parse_safari_cookies_header(data, logger)
p = DataParser(data[body_start:], logger)
for page_size in page_sizes:
_parse_safari_cookies_page(p.read_bytes(page_size), jar, logger)
p.skip_to_end('footer')
return jar
class _LinuxDesktopEnvironment(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h
DesktopEnvironment
"""
OTHER = auto()
CINNAMON = auto()
GNOME = auto()
KDE = auto()
PANTHEON = auto()
UNITY = auto()
XFCE = auto()
class _LinuxKeyring(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.h
SelectedLinuxBackend
"""
KWALLET = auto()
GNOMEKEYRING = auto()
BASICTEXT = auto()
SUPPORTED_KEYRINGS = _LinuxKeyring.__members__.keys()
def _get_linux_desktop_environment(env):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc
GetDesktopEnvironment
"""
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
desktop_session = env.get('DESKTOP_SESSION', None)
if xdg_current_desktop is not None:
xdg_current_desktop = xdg_current_desktop.split(':')[0].strip()
if xdg_current_desktop == 'Unity':
if desktop_session is not None and 'gnome-fallback' in desktop_session:
return _LinuxDesktopEnvironment.GNOME
else:
return _LinuxDesktopEnvironment.UNITY
elif xdg_current_desktop == 'GNOME':
return _LinuxDesktopEnvironment.GNOME
elif xdg_current_desktop == 'X-Cinnamon':
return _LinuxDesktopEnvironment.CINNAMON
elif xdg_current_desktop == 'KDE':
return _LinuxDesktopEnvironment.KDE
elif xdg_current_desktop == 'Pantheon':
return _LinuxDesktopEnvironment.PANTHEON
elif xdg_current_desktop == 'XFCE':
return _LinuxDesktopEnvironment.XFCE
elif desktop_session is not None:
if desktop_session in ('mate', 'gnome'):
return _LinuxDesktopEnvironment.GNOME
elif 'kde' in desktop_session:
return _LinuxDesktopEnvironment.KDE
elif 'xfce' in desktop_session:
return _LinuxDesktopEnvironment.XFCE
else:
if 'GNOME_DESKTOP_SESSION_ID' in env:
return _LinuxDesktopEnvironment.GNOME
elif 'KDE_FULL_SESSION' in env:
return _LinuxDesktopEnvironment.KDE
return _LinuxDesktopEnvironment.OTHER
def _choose_linux_keyring(logger):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.cc
SelectBackend
"""
desktop_environment = _get_linux_desktop_environment(os.environ)
logger.debug(f'detected desktop environment: {desktop_environment.name}')
if desktop_environment == _LinuxDesktopEnvironment.KDE:
linux_keyring = _LinuxKeyring.KWALLET
elif desktop_environment == _LinuxDesktopEnvironment.OTHER:
linux_keyring = _LinuxKeyring.BASICTEXT
else:
linux_keyring = _LinuxKeyring.GNOMEKEYRING
return linux_keyring
def _get_kwallet_network_wallet(logger):
""" The name of the wallet used to store network passwords.
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/kwallet_dbus.cc
KWalletDBus::NetworkWallet
which does a dbus call to the following function:
https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html
Wallet::NetworkWallet
"""
default_wallet = 'kdewallet'
try:
proc = Popen([
'dbus-send', '--session', '--print-reply=literal',
'--dest=org.kde.kwalletd5',
'/modules/kwalletd5',
'org.kde.KWallet.networkWallet'
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.warning('failed to read NetworkWallet')
return default_wallet
else:
network_wallet = stdout.decode('utf-8').strip()
logger.debug(f'NetworkWallet = "{network_wallet}"')
return network_wallet
except Exception as e:
logger.warning(f'exception while obtaining NetworkWallet: {e}')
return default_wallet
def _get_kwallet_password(browser_keyring_name, logger):
logger.debug('using kwallet-query to obtain password from kwallet')
if shutil.which('kwallet-query') is None:
logger.error('kwallet-query command not found. KWallet and kwallet-query '
'must be installed to read from KWallet. kwallet-query should be'
'included in the kwallet package for your distribution')
return b''
network_wallet = _get_kwallet_network_wallet(logger)
try:
proc = Popen([
'kwallet-query',
'--read-password', f'{browser_keyring_name} Safe Storage',
'--folder', f'{browser_keyring_name} Keys',
network_wallet
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.error(f'kwallet-query failed with return code {proc.returncode}. Please consult '
'the kwallet-query man page for details')
return b''
else:
if stdout.lower().startswith(b'failed to read'):
logger.debug('failed to read password from kwallet. Using empty string instead')
# this sometimes occurs in KDE because chrome does not check hasEntry and instead
# just tries to read the value (which kwallet returns "") whereas kwallet-query
# checks hasEntry. To verify this:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
# while starting chrome.
# this may be a bug as the intended behaviour is to generate a random password and store
# it, but that doesn't matter here.
return b''
else:
logger.debug('password found')
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running kwallet-query: {error_to_str(e)}')
return b''
def _get_gnome_keyring_password(browser_keyring_name, logger):
if not SECRETSTORAGE_AVAILABLE:
logger.error(f'secretstorage not available {SECRETSTORAGE_UNAVAILABLE_REASON}')
return b''
# the Gnome keyring does not seem to organise keys in the same way as KWallet,
# using `dbus-monitor` during startup, it can be observed that chromium lists all keys
# and presumably searches for its key in the list. It appears that we must do the same.
# https://github.com/jaraco/keyring/issues/556
with contextlib.closing(secretstorage.dbus_init()) as con:
col = secretstorage.get_default_collection(con)
for item in col.get_all_items():
if item.get_label() == f'{browser_keyring_name} Safe Storage':
return item.get_secret()
else:
logger.error('failed to read from keyring')
return b''
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
# note: chrome/chromium can be run with the following flags to determine which keyring backend
# it has chosen to use
# chromium --enable-logging=stderr --v=1 2>&1 | grep key_storage_
# Chromium supports a flag: --password-store=<basic|gnome|kwallet> so the automatic detection
# will not be sufficient in all cases.
keyring = _LinuxKeyring[keyring] if keyring else _choose_linux_keyring(logger)
logger.debug(f'Chosen keyring: {keyring.name}')
if keyring == _LinuxKeyring.KWALLET:
return _get_kwallet_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.GNOMEKEYRING:
return _get_gnome_keyring_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.BASICTEXT:
# when basic text is chosen, all cookies are stored as v10 (so no keyring password is required)
return None
assert False, f'Unknown keyring {keyring}'
def _get_mac_keyring_password(browser_keyring_name, logger):
logger.debug('using find-generic-password to obtain password from OSX keychain')
try:
proc = Popen(
['security', 'find-generic-password',
'-w', # write password to stdout
'-a', browser_keyring_name, # match 'account'
'-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running find-generic-password: {error_to_str(e)}')
return None
def _get_windows_v10_key(browser_root, logger):
path = _find_most_recently_used_file(browser_root, 'Local State', logger)
if path is None:
logger.error('could not find local state file')
return None
logger.debug(f'Found local state file at "{path}"')
with open(path, encoding='utf8') as f:
data = json.load(f)
try:
base64_key = data['os_crypt']['encrypted_key']
except KeyError:
logger.error('no encrypted key in Local State')
return None
encrypted_key = compat_b64decode(base64_key)
prefix = b'DPAPI'
if not encrypted_key.startswith(prefix):
logger.error('invalid key')
return None
return _decrypt_windows_dpapi(encrypted_key[len(prefix):], logger)
def pbkdf2_sha1(password, salt, iterations, key_length):
return pbkdf2_hmac('sha1', password, salt, iterations, key_length)
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
try:
return plaintext.decode('utf-8')
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
try:
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
except ValueError:
logger.warning('failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?', only_once=True)
return None
try:
return plaintext.decode('utf-8')
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_windows_dpapi(ciphertext, logger):
"""
References:
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
"""
from ctypes.wintypes import DWORD
class DATA_BLOB(ctypes.Structure):
_fields_ = [('cbData', DWORD),
('pbData', ctypes.POINTER(ctypes.c_char))]
buffer = ctypes.create_string_buffer(ciphertext)
blob_in = DATA_BLOB(ctypes.sizeof(buffer), buffer)
blob_out = DATA_BLOB()
ret = ctypes.windll.crypt32.CryptUnprotectData(
ctypes.byref(blob_in), # pDataIn
None, # ppszDataDescr: human readable description of pDataIn
None, # pOptionalEntropy: salt?
None, # pvReserved: must be NULL
None, # pPromptStruct: information about prompts to display
0, # dwFlags
ctypes.byref(blob_out) # pDataOut
)
if not ret:
logger.warning('failed to decrypt with DPAPI', only_once=True)
return None
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
return result
def _config_home():
return os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
def _open_database_copy(database_path, tmpdir):
# cannot open sqlite databases if they are already in use (e.g. by the browser)
database_copy_path = os.path.join(tmpdir, 'temporary.sqlite')
shutil.copy(database_path, database_copy_path)
conn = sqlite3.connect(database_copy_path)
return conn.cursor()
def _get_column_names(cursor, table_name):
table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall()
return [row[1].decode('utf-8') for row in table_info]
def _find_most_recently_used_file(root, filename, logger):
# if there are multiple browser profiles, take the most recently used one
i, paths = 0, []
with _create_progress_bar(logger) as progress_bar:
for curr_root, dirs, files in os.walk(root):
for file in files:
i += 1
progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched')
if file == filename:
paths.append(os.path.join(curr_root, file))
return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime)
def _merge_cookie_jars(jars):
output_jar = YoutubeDLCookieJar()
for jar in jars:
for cookie in jar:
output_jar.set_cookie(cookie)
if jar.filename is not None:
output_jar.filename = jar.filename
return output_jar
def _is_path(value):
return os.path.sep in value
def _parse_browser_specification(browser_name, profile=None, keyring=None):
if browser_name not in SUPPORTED_BROWSERS:
raise ValueError(f'unsupported browser: "{browser_name}"')
if keyring not in (None, *SUPPORTED_KEYRINGS):
raise ValueError(f'unsupported keyring: "{keyring}"')
if profile is not None and _is_path(profile):
profile = os.path.expanduser(profile)
return browser_name, profile, keyring
| [
"enum.auto",
"ctypes.string_at",
"ctypes.create_string_buffer",
"secretstorage.get_default_collection",
"datetime.timedelta",
"os.walk",
"datetime.datetime",
"os.path.expanduser",
"os.access",
"shutil.which",
"os.path.isfile",
"os.path.dirname",
"secretstorage.dbus_init",
"shutil.copy",
"os.lstat",
"ctypes.sizeof",
"tempfile.TemporaryDirectory",
"ctypes.byref",
"ctypes.POINTER",
"sqlite3.connect",
"os.path.expandvars",
"os.path.join",
"hashlib.pbkdf2_hmac",
"ctypes.windll.kernel32.LocalFree",
"json.load"
] | [((19724, 19785), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Library/Cookies/Cookies.binarycookies"""'], {}), "('~/Library/Cookies/Cookies.binarycookies')\n", (19742, 19785), False, 'import os\n'), ((25866, 25872), 'enum.auto', 'auto', ([], {}), '()\n', (25870, 25872), False, 'from enum import Enum, auto\n'), ((25888, 25894), 'enum.auto', 'auto', ([], {}), '()\n', (25892, 25894), False, 'from enum import Enum, auto\n'), ((25907, 25913), 'enum.auto', 'auto', ([], {}), '()\n', (25911, 25913), False, 'from enum import Enum, auto\n'), ((25924, 25930), 'enum.auto', 'auto', ([], {}), '()\n', (25928, 25930), False, 'from enum import Enum, auto\n'), ((25946, 25952), 'enum.auto', 'auto', ([], {}), '()\n', (25950, 25952), False, 'from enum import Enum, auto\n'), ((25965, 25971), 'enum.auto', 'auto', ([], {}), '()\n', (25969, 25971), False, 'from enum import Enum, auto\n'), ((25983, 25989), 'enum.auto', 'auto', ([], {}), '()\n', (25987, 25989), False, 'from enum import Enum, auto\n'), ((26188, 26194), 'enum.auto', 'auto', ([], {}), '()\n', (26192, 26194), False, 'from enum import Enum, auto\n'), ((26214, 26220), 'enum.auto', 'auto', ([], {}), '()\n', (26218, 26220), False, 'from enum import Enum, auto\n'), ((26237, 26243), 'enum.auto', 'auto', ([], {}), '()\n', (26241, 26243), False, 'from enum import Enum, auto\n'), ((35372, 35431), 'hashlib.pbkdf2_hmac', 'pbkdf2_hmac', (['"""sha1"""', 'password', 'salt', 'iterations', 'key_length'], {}), "('sha1', password, salt, iterations, key_length)\n", (35383, 35431), False, 'from hashlib import pbkdf2_hmac\n'), ((36808, 36847), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['ciphertext'], {}), '(ciphertext)\n', (36835, 36847), False, 'import ctypes\n'), ((37438, 37488), 'ctypes.string_at', 'ctypes.string_at', (['blob_out.pbData', 'blob_out.cbData'], {}), '(blob_out.pbData, blob_out.cbData)\n', (37454, 37488), False, 'import ctypes\n'), ((37493, 37542), 'ctypes.windll.kernel32.LocalFree', 'ctypes.windll.kernel32.LocalFree', (['blob_out.pbData'], {}), '(blob_out.pbData)\n', (37525, 37542), False, 'import ctypes\n'), ((37820, 37860), 'os.path.join', 'os.path.join', (['tmpdir', '"""temporary.sqlite"""'], {}), "(tmpdir, 'temporary.sqlite')\n", (37832, 37860), False, 'import os\n'), ((37865, 37911), 'shutil.copy', 'shutil.copy', (['database_path', 'database_copy_path'], {}), '(database_path, database_copy_path)\n', (37876, 37911), False, 'import shutil\n'), ((37923, 37958), 'sqlite3.connect', 'sqlite3.connect', (['database_copy_path'], {}), '(database_copy_path)\n', (37938, 37958), False, 'import sqlite3\n'), ((3300, 3331), 'os.access', 'os.access', (['cookie_file', 'os.R_OK'], {}), '(cookie_file, os.R_OK)\n', (3309, 3331), False, 'import os\n'), ((4809, 4853), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""yt_dlp"""'}), "(prefix='yt_dlp')\n", (4836, 4853), False, 'import tempfile\n'), ((6187, 6227), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.mozilla/firefox"""'], {}), "('~/.mozilla/firefox')\n", (6205, 6227), False, 'import os\n'), ((10520, 10564), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""yt_dlp"""'}), "(prefix='yt_dlp')\n", (10547, 10564), False, 'import tempfile\n'), ((19798, 19826), 'os.path.isfile', 'os.path.isfile', (['cookies_path'], {}), '(cookies_path)\n', (19812, 19826), False, 'import os\n'), ((19908, 20020), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies"""'], {}), "(\n '~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies'\n )\n", (19926, 20020), False, 'import os\n'), ((29963, 29992), 'shutil.which', 'shutil.which', (['"""kwallet-query"""'], {}), "('kwallet-query')\n", (29975, 29992), False, 'import shutil\n'), ((32509, 32550), 'secretstorage.get_default_collection', 'secretstorage.get_default_collection', (['con'], {}), '(con)\n', (32545, 32550), False, 'import secretstorage\n'), ((34885, 34897), 'json.load', 'json.load', (['f'], {}), '(f)\n', (34894, 34897), False, 'import json\n'), ((36872, 36893), 'ctypes.sizeof', 'ctypes.sizeof', (['buffer'], {}), '(buffer)\n', (36885, 36893), False, 'import ctypes\n'), ((36990, 37011), 'ctypes.byref', 'ctypes.byref', (['blob_in'], {}), '(blob_in)\n', (37002, 37011), False, 'import ctypes\n'), ((37276, 37298), 'ctypes.byref', 'ctypes.byref', (['blob_out'], {}), '(blob_out)\n', (37288, 37298), False, 'import ctypes\n'), ((37628, 37659), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.config"""'], {}), "('~/.config')\n", (37646, 37659), False, 'import os\n'), ((38419, 38432), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (38426, 38432), False, 'import os\n'), ((39460, 39487), 'os.path.expanduser', 'os.path.expanduser', (['profile'], {}), '(profile)\n', (39478, 39487), False, 'import os\n'), ((6277, 6336), 'os.path.expandvars', 'os.path.expandvars', (['"""%APPDATA%\\\\Mozilla\\\\Firefox\\\\Profiles"""'], {}), "('%APPDATA%\\\\Mozilla\\\\Firefox\\\\Profiles')\n", (6295, 6336), False, 'import os\n'), ((7205, 7241), 'os.path.expandvars', 'os.path.expandvars', (['"""%LOCALAPPDATA%"""'], {}), "('%LOCALAPPDATA%')\n", (7223, 7241), False, 'import os\n'), ((7268, 7299), 'os.path.expandvars', 'os.path.expandvars', (['"""%APPDATA%"""'], {}), "('%APPDATA%')\n", (7286, 7299), False, 'import os\n'), ((20026, 20054), 'os.path.isfile', 'os.path.isfile', (['cookies_path'], {}), '(cookies_path)\n', (20040, 20054), False, 'import os\n'), ((32460, 32485), 'secretstorage.dbus_init', 'secretstorage.dbus_init', ([], {}), '()\n', (32483, 32485), False, 'import secretstorage\n'), ((6385, 6444), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Library/Application Support/Firefox"""'], {}), "('~/Library/Application Support/Firefox')\n", (6403, 6444), False, 'import os\n'), ((6782, 6833), 'os.path.join', 'os.path.join', (['config', '"""BraveSoftware/Brave-Browser"""'], {}), "(config, 'BraveSoftware/Brave-Browser')\n", (6794, 6833), False, 'import os\n'), ((6857, 6894), 'os.path.join', 'os.path.join', (['config', '"""google-chrome"""'], {}), "(config, 'google-chrome')\n", (6869, 6894), False, 'import os\n'), ((6920, 6952), 'os.path.join', 'os.path.join', (['config', '"""chromium"""'], {}), "(config, 'chromium')\n", (6932, 6952), False, 'import os\n'), ((6974, 7012), 'os.path.join', 'os.path.join', (['config', '"""microsoft-edge"""'], {}), "(config, 'microsoft-edge')\n", (6986, 7012), False, 'import os\n'), ((7035, 7064), 'os.path.join', 'os.path.join', (['config', '"""opera"""'], {}), "(config, 'opera')\n", (7047, 7064), False, 'import os\n'), ((7089, 7120), 'os.path.join', 'os.path.join', (['config', '"""vivaldi"""'], {}), "(config, 'vivaldi')\n", (7101, 7120), False, 'import os\n'), ((7885, 7936), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Library/Application Support"""'], {}), "('~/Library/Application Support')\n", (7903, 7936), False, 'import os\n'), ((9778, 9802), 'os.path.dirname', 'os.path.dirname', (['profile'], {}), '(profile)\n', (9793, 9802), False, 'import os\n'), ((9923, 9967), 'os.path.join', 'os.path.join', (["config['browser_dir']", 'profile'], {}), "(config['browser_dir'], profile)\n", (9935, 9967), False, 'import os\n'), ((36762, 36791), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char'], {}), '(ctypes.c_char)\n', (36776, 36791), False, 'import ctypes\n'), ((7345, 7415), 'os.path.join', 'os.path.join', (['appdata_local', '"""BraveSoftware\\\\Brave-Browser\\\\User Data"""'], {}), "(appdata_local, 'BraveSoftware\\\\Brave-Browser\\\\User Data')\n", (7357, 7415), False, 'import os\n'), ((7438, 7494), 'os.path.join', 'os.path.join', (['appdata_local', '"""Google\\\\Chrome\\\\User Data"""'], {}), "(appdata_local, 'Google\\\\Chrome\\\\User Data')\n", (7450, 7494), False, 'import os\n'), ((7519, 7569), 'os.path.join', 'os.path.join', (['appdata_local', '"""Chromium\\\\User Data"""'], {}), "(appdata_local, 'Chromium\\\\User Data')\n", (7531, 7569), False, 'import os\n'), ((7591, 7648), 'os.path.join', 'os.path.join', (['appdata_local', '"""Microsoft\\\\Edge\\\\User Data"""'], {}), "(appdata_local, 'Microsoft\\\\Edge\\\\User Data')\n", (7603, 7648), False, 'import os\n'), ((7670, 7731), 'os.path.join', 'os.path.join', (['appdata_roaming', '"""Opera Software\\\\Opera Stable"""'], {}), "(appdata_roaming, 'Opera Software\\\\Opera Stable')\n", (7682, 7731), False, 'import os\n'), ((7756, 7805), 'os.path.join', 'os.path.join', (['appdata_local', '"""Vivaldi\\\\User Data"""'], {}), "(appdata_local, 'Vivaldi\\\\User Data')\n", (7768, 7805), False, 'import os\n'), ((22226, 22273), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(0)', '(0)'], {'tzinfo': 'timezone.utc'}), '(2001, 1, 1, 0, 0, tzinfo=timezone.utc)\n', (22234, 22273), False, 'from datetime import datetime, timedelta, timezone\n'), ((22276, 22304), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'timestamp'}), '(seconds=timestamp)\n', (22285, 22304), False, 'from datetime import datetime, timedelta, timezone\n'), ((7982, 8034), 'os.path.join', 'os.path.join', (['appdata', '"""BraveSoftware/Brave-Browser"""'], {}), "(appdata, 'BraveSoftware/Brave-Browser')\n", (7994, 8034), False, 'import os\n'), ((8058, 8096), 'os.path.join', 'os.path.join', (['appdata', '"""Google/Chrome"""'], {}), "(appdata, 'Google/Chrome')\n", (8070, 8096), False, 'import os\n'), ((8122, 8155), 'os.path.join', 'os.path.join', (['appdata', '"""Chromium"""'], {}), "(appdata, 'Chromium')\n", (8134, 8155), False, 'import os\n'), ((8177, 8216), 'os.path.join', 'os.path.join', (['appdata', '"""Microsoft Edge"""'], {}), "(appdata, 'Microsoft Edge')\n", (8189, 8216), False, 'import os\n'), ((8239, 8287), 'os.path.join', 'os.path.join', (['appdata', '"""com.operasoftware.Opera"""'], {}), "(appdata, 'com.operasoftware.Opera')\n", (8251, 8287), False, 'import os\n'), ((8312, 8344), 'os.path.join', 'os.path.join', (['appdata', '"""Vivaldi"""'], {}), "(appdata, 'Vivaldi')\n", (8324, 8344), False, 'import os\n'), ((38648, 38677), 'os.path.join', 'os.path.join', (['curr_root', 'file'], {}), '(curr_root, file)\n', (38660, 38677), False, 'import os\n'), ((38741, 38755), 'os.lstat', 'os.lstat', (['path'], {}), '(path)\n', (38749, 38755), False, 'import os\n')] |
import logging
from collections import Counter
from django.core.management.base import BaseCommand
from django.db.models import Q
from TWLight.applications.models import Application
from TWLight.resources.models import Partner
from TWLight.applications.signals import Reminder
from TWLight.users.models import Editor
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
# This is not DRY. Originally, this pulled the queryset from
# TWLight.applications.views.ListApplicationsView.get_queryset().
# But that now expects a request object. So, we did a copy/paste.
# We're actually getting apps with a status of PENDING or QUESTION
# or APPROVED, and their corresponding user preferences being True
# for partners with a status of AVAILABLE.
all_apps = (
Application.objects.filter(
Q(
partner__coordinator__editor__user__userprofile__pending_app_reminders=True
)
& Q(status=Application.PENDING)
| Q(
partner__coordinator__editor__user__userprofile__discussion_app_reminders=True
)
& Q(status=Application.QUESTION)
| Q(
partner__coordinator__editor__user__userprofile__approved_app_reminders=True
)
& Q(status=Application.APPROVED),
partner__status__in=[Partner.AVAILABLE],
editor__isnull=False,
)
.exclude(editor__user__groups__name="restricted")
.order_by("status", "partner", "date_created")
)
# A deduplicated dict of coordinators from the pending app queryset, along
# with a count of how many total pending apps they have
coordinators = Counter(
all_apps.values_list(
"partner__coordinator__editor",
"partner__coordinator__email",
"partner__coordinator__editor__user__userprofile__lang",
)
)
for coordinator, count in list(coordinators.items()):
try:
# We create a dictionary with the three status codes
# we'd want to send emails for, and their corresponding
# counts.
app_status_and_count = {
Application.PENDING: all_apps.filter(
status=Application.PENDING,
partner__coordinator__editor=coordinator[0],
).count(),
Application.QUESTION: all_apps.filter(
status=Application.QUESTION,
partner__coordinator__editor=coordinator[0],
).count(),
Application.APPROVED: all_apps.filter(
status=Application.APPROVED,
partner__coordinator__editor=coordinator[0],
).count(),
}
editor = Editor.objects.get(id=coordinator[0])
except Editor.DoesNotExist:
logger.info(
"Editor {} does not exist; skipping.".format(coordinator[0])
)
break
# Only bother with the signal if we have a coordinator email.
if coordinator[1]:
Reminder.coordinator_reminder.send(
sender=self.__class__,
app_status_and_count=app_status_and_count,
coordinator_wp_username=editor.wp_username,
coordinator_email=coordinator[1],
coordinator_lang=coordinator[2],
)
| [
"logging.getLogger",
"django.db.models.Q",
"TWLight.users.models.Editor.objects.get",
"TWLight.applications.signals.Reminder.coordinator_reminder.send"
] | [((328, 355), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (345, 355), False, 'import logging\n'), ((3069, 3106), 'TWLight.users.models.Editor.objects.get', 'Editor.objects.get', ([], {'id': 'coordinator[0]'}), '(id=coordinator[0])\n', (3087, 3106), False, 'from TWLight.users.models import Editor\n'), ((3418, 3643), 'TWLight.applications.signals.Reminder.coordinator_reminder.send', 'Reminder.coordinator_reminder.send', ([], {'sender': 'self.__class__', 'app_status_and_count': 'app_status_and_count', 'coordinator_wp_username': 'editor.wp_username', 'coordinator_email': 'coordinator[1]', 'coordinator_lang': 'coordinator[2]'}), '(sender=self.__class__,\n app_status_and_count=app_status_and_count, coordinator_wp_username=\n editor.wp_username, coordinator_email=coordinator[1], coordinator_lang=\n coordinator[2])\n', (3452, 3643), False, 'from TWLight.applications.signals import Reminder\n'), ((1291, 1370), 'django.db.models.Q', 'Q', ([], {'partner__coordinator__editor__user__userprofile__approved_app_reminders': '(True)'}), '(partner__coordinator__editor__user__userprofile__approved_app_reminders=True)\n', (1292, 1370), False, 'from django.db.models import Q\n'), ((1427, 1457), 'django.db.models.Q', 'Q', ([], {'status': 'Application.APPROVED'}), '(status=Application.APPROVED)\n', (1428, 1457), False, 'from django.db.models import Q\n'), ((921, 999), 'django.db.models.Q', 'Q', ([], {'partner__coordinator__editor__user__userprofile__pending_app_reminders': '(True)'}), '(partner__coordinator__editor__user__userprofile__pending_app_reminders=True)\n', (922, 999), False, 'from django.db.models import Q\n'), ((1056, 1085), 'django.db.models.Q', 'Q', ([], {'status': 'Application.PENDING'}), '(status=Application.PENDING)\n', (1057, 1085), False, 'from django.db.models import Q\n'), ((1104, 1190), 'django.db.models.Q', 'Q', ([], {'partner__coordinator__editor__user__userprofile__discussion_app_reminders': '(True)'}), '(partner__coordinator__editor__user__userprofile__discussion_app_reminders\n =True)\n', (1105, 1190), False, 'from django.db.models import Q\n'), ((1242, 1272), 'django.db.models.Q', 'Q', ([], {'status': 'Application.QUESTION'}), '(status=Application.QUESTION)\n', (1243, 1272), False, 'from django.db.models import Q\n')] |
# pvtrace is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pvtrace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from external.transformations import translation_matrix, rotation_matrix
import external.transformations as tf
from Trace import Photon
from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm
from Materials import Spectrum
def random_spherecial_vector():
# This method of calculating isotropic vectors is taken from GNU Scientific Library
LOOP = True
while LOOP:
x = -1. + 2. * np.random.uniform()
y = -1. + 2. * np.random.uniform()
s = x**2 + y**2
if s <= 1.0:
LOOP = False
z = -1. + 2. * s
a = 2 * np.sqrt(1 - s)
x = a * x
y = a * y
return np.array([x,y,z])
class SimpleSource(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False):
super(SimpleSource, self).__init__()
self.position = position
self.direction = direction
self.wavelength = wavelength
self.use_random_polarisation = use_random_polarisation
self.throw = 0
self.source_id = "SimpleSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
# If use_polarisation is set generate a random polarisation vector of the photon
if self.use_random_polarisation:
# Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon
vec = random_spherecial_vector()
vec[2] = 0.
vec = norm(vec)
R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1])
photon.polarisation = transform_direction(vec, R)
else:
photon.polarisation = None
photon.id = self.throw
self.throw = self.throw + 1
return photon
class Laser(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None):
super(Laser, self).__init__()
self.position = np.array(position)
self.direction = np.array(direction)
self.wavelength = wavelength
assert polarisation != None, "Polarisation of the Laser is not set."
self.polarisation = np.array(polarisation)
self.throw = 0
self.source_id = "LaserSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
photon.polarisation = self.polarisation
photon.id = self.throw
self.throw = self.throw + 1
return photon
class PlanarSource(object):
"""A box that emits photons from the top surface (normal), sampled from the spectrum."""
def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05):
super(PlanarSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.plane = FinitePlane(length=length, width=width)
self.length = length
self.width = width
# direction is the direction that photons are fired out of the plane in the GLOBAL FRAME.
# i.e. this is passed directly to the photon to set is's direction
self.direction = direction
self.throw = 0
self.source_id = "PlanarSource_" + str(id(self))
def translate(self, translation):
self.plane.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.plane.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Create a point which is on the surface of the finite plane in it's local frame
x = np.random.uniform(0., self.length)
y = np.random.uniform(0., self.width)
local_point = (x, y, 0.)
# Transform the direciton
photon.position = transform_point(local_point, self.plane.transform)
photon.direction = self.direction
photon.active = True
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSource(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.throw = 0
self.source_id = "LensSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
z = np.random.uniform(self.planeorigin[2],self.planeextent[2])
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2]
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSourceAngle(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
For this lense an additional z-boost is added (Angle of incidence in z-direction).
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSourceAngle, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.angle = angle
self.throw = 0
self.source_id = "LensSourceAngle_" + str(id(self))
def photon(self):
photon = Photon()
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
boost = y*np.tan(self.angle)
z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) - boost
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2] + boost
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class CylindricalSource(object):
"""
A source for photons emitted in a random direction and position inside a cylinder(radius, length)
"""
def __init__(self, spectrum = None, wavelength = 555, radius = 1, length = 10):
super(CylindricalSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.shape = Cylinder(radius = radius, length = length)
self.radius = radius
self.length = length
self.throw = 0
self.source_id = "CylindricalSource_" + str(id(self))
def translate(self, translation):
self.shape.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.shape.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position of emission
phi = np.random.uniform(0., 2*np.pi)
r = np.random.uniform(0.,self.radius)
x = r*np.cos(phi)
y = r*np.sin(phi)
z = np.random.uniform(0.,self.length)
local_center = (x,y,z)
photon.position = transform_point(local_center, self.shape.transform)
# Direction of emission (no need to transform if meant to be isotropic)
phi = np.random.uniform(0.,2*np.pi)
theta = np.random.uniform(0.,np.pi)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
local_direction = (x,y,z)
photon.direction = local_direction
# Set wavelength of photon
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
# Further initialisation
photon.active = True
return photon
class PointSource(object):
"""
A point source that emits randomly in solid angle specified by phimin, ..., thetamax
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi):
super(PointSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.throw = 0
self.source_id = "PointSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
phi = np.random.uniform(self.phimin, self.phimax)
theta = np.random.uniform(self.thetamin, self.thetamax)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
direction = (x,y,z)
transform = tf.translation_matrix((0,0,0))
point = transform_point(self.center, transform)
photon.direction = direction
photon.position = point
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
photon.active = True
return photon
class RadialSource(object):
"""
A point source that emits at discrete angles theta(i) and phi(i)
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi, spacing=20):
super(RadialSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.spacing = spacing
self.throw = 0
self.source_id = "RadialSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
intphi = np.random.randint(1, self.spacing+1)
inttheta = np.random.randint(1, self.spacing+1)
phi = intphi*(self.phimax-self.phimin)/self.spacing
if self.thetamin == self.thetamax:
theta = self.thetamin
else:
theta = inttheta*(self.thetamax-self.thetamin)/self.spacing
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
direction = (x,y,z)
transform = tf.translation_matrix((0,0,0))
point = transform_point(self.center, transform)
photon.direction = direction
photon.position = point
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
photon.active = True
return photon
| [
"Geometry.Cylinder",
"external.transformations.translation_matrix",
"numpy.sqrt",
"numpy.tan",
"Geometry.FinitePlane",
"numpy.sin",
"Geometry.transform_direction",
"numpy.array",
"numpy.random.randint",
"Geometry.transform_point",
"external.transformations.rotation_matrix",
"numpy.cos",
"numpy.random.uniform",
"Geometry.rotation_matrix_from_vector_alignment",
"Trace.Photon",
"Geometry.norm"
] | [((1353, 1372), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1361, 1372), True, 'import numpy as np\n'), ((1299, 1313), 'numpy.sqrt', 'np.sqrt', (['(1 - s)'], {}), '(1 - s)\n', (1306, 1313), True, 'import numpy as np\n'), ((1971, 1979), 'Trace.Photon', 'Photon', ([], {}), '()\n', (1977, 1979), False, 'from Trace import Photon\n'), ((2045, 2068), 'numpy.array', 'np.array', (['self.position'], {}), '(self.position)\n', (2053, 2068), True, 'import numpy as np\n'), ((2096, 2120), 'numpy.array', 'np.array', (['self.direction'], {}), '(self.direction)\n', (2104, 2120), True, 'import numpy as np\n'), ((3139, 3157), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (3147, 3157), True, 'import numpy as np\n'), ((3183, 3202), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (3191, 3202), True, 'import numpy as np\n'), ((3345, 3367), 'numpy.array', 'np.array', (['polarisation'], {}), '(polarisation)\n', (3353, 3367), True, 'import numpy as np\n'), ((3513, 3521), 'Trace.Photon', 'Photon', ([], {}), '()\n', (3519, 3521), False, 'from Trace import Photon\n'), ((3587, 3610), 'numpy.array', 'np.array', (['self.position'], {}), '(self.position)\n', (3595, 3610), True, 'import numpy as np\n'), ((3638, 3662), 'numpy.array', 'np.array', (['self.direction'], {}), '(self.direction)\n', (3646, 3662), True, 'import numpy as np\n'), ((4239, 4278), 'Geometry.FinitePlane', 'FinitePlane', ([], {'length': 'length', 'width': 'width'}), '(length=length, width=width)\n', (4250, 4278), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((4891, 4899), 'Trace.Photon', 'Photon', ([], {}), '()\n', (4897, 4899), False, 'from Trace import Photon\n'), ((5108, 5143), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.length'], {}), '(0.0, self.length)\n', (5125, 5143), True, 'import numpy as np\n'), ((5155, 5189), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.width'], {}), '(0.0, self.width)\n', (5172, 5189), True, 'import numpy as np\n'), ((5291, 5341), 'Geometry.transform_point', 'transform_point', (['local_point', 'self.plane.transform'], {}), '(local_point, self.plane.transform)\n', (5306, 5341), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((6264, 6283), 'numpy.array', 'np.array', (['linepoint'], {}), '(linepoint)\n', (6272, 6283), True, 'import numpy as np\n'), ((6313, 6336), 'numpy.array', 'np.array', (['linedirection'], {}), '(linedirection)\n', (6321, 6336), True, 'import numpy as np\n'), ((6490, 6498), 'Trace.Photon', 'Photon', ([], {}), '()\n', (6496, 6498), False, 'from Trace import Photon\n'), ((6646, 6705), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[0]', 'self.planeextent[0]'], {}), '(self.planeorigin[0], self.planeextent[0])\n', (6663, 6705), True, 'import numpy as np\n'), ((6717, 6776), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[1]', 'self.planeextent[1]'], {}), '(self.planeorigin[1], self.planeextent[1])\n', (6734, 6776), True, 'import numpy as np\n'), ((6788, 6847), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[2]', 'self.planeextent[2]'], {}), '(self.planeorigin[2], self.planeextent[2])\n', (6805, 6847), True, 'import numpy as np\n'), ((6873, 6892), 'numpy.array', 'np.array', (['(x, y, z)'], {}), '((x, y, z))\n', (6881, 6892), True, 'import numpy as np\n'), ((6949, 6974), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0))\n', (6957, 6974), True, 'import numpy as np\n'), ((8392, 8411), 'numpy.array', 'np.array', (['linepoint'], {}), '(linepoint)\n', (8400, 8411), True, 'import numpy as np\n'), ((8441, 8464), 'numpy.array', 'np.array', (['linedirection'], {}), '(linedirection)\n', (8449, 8464), True, 'import numpy as np\n'), ((8658, 8666), 'Trace.Photon', 'Photon', ([], {}), '()\n', (8664, 8666), False, 'from Trace import Photon\n'), ((8784, 8843), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[0]', 'self.planeextent[0]'], {}), '(self.planeorigin[0], self.planeextent[0])\n', (8801, 8843), True, 'import numpy as np\n'), ((8855, 8914), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[1]', 'self.planeextent[1]'], {}), '(self.planeorigin[1], self.planeextent[1])\n', (8872, 8914), True, 'import numpy as np\n'), ((9056, 9075), 'numpy.array', 'np.array', (['(x, y, z)'], {}), '((x, y, z))\n', (9064, 9075), True, 'import numpy as np\n'), ((9132, 9157), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0))\n', (9140, 9157), True, 'import numpy as np\n'), ((10215, 10253), 'Geometry.Cylinder', 'Cylinder', ([], {'radius': 'radius', 'length': 'length'}), '(radius=radius, length=length)\n', (10223, 10253), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((10673, 10681), 'Trace.Photon', 'Photon', ([], {}), '()\n', (10679, 10681), False, 'from Trace import Photon\n'), ((10843, 10876), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2 * np.pi)'], {}), '(0.0, 2 * np.pi)\n', (10860, 10876), True, 'import numpy as np\n'), ((10886, 10921), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.radius'], {}), '(0.0, self.radius)\n', (10903, 10921), True, 'import numpy as np\n'), ((11002, 11037), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.length'], {}), '(0.0, self.length)\n', (11019, 11037), True, 'import numpy as np\n'), ((11102, 11153), 'Geometry.transform_point', 'transform_point', (['local_center', 'self.shape.transform'], {}), '(local_center, self.shape.transform)\n', (11117, 11153), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((11266, 11299), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2 * np.pi)'], {}), '(0.0, 2 * np.pi)\n', (11283, 11299), True, 'import numpy as np\n'), ((11312, 11341), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'np.pi'], {}), '(0.0, np.pi)\n', (11329, 11341), True, 'import numpy as np\n'), ((11437, 11450), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (11443, 11450), True, 'import numpy as np\n'), ((12545, 12553), 'Trace.Photon', 'Photon', ([], {}), '()\n', (12551, 12553), False, 'from Trace import Photon\n'), ((12683, 12726), 'numpy.random.uniform', 'np.random.uniform', (['self.phimin', 'self.phimax'], {}), '(self.phimin, self.phimax)\n', (12700, 12726), True, 'import numpy as np\n'), ((12743, 12790), 'numpy.random.uniform', 'np.random.uniform', (['self.thetamin', 'self.thetamax'], {}), '(self.thetamin, self.thetamax)\n', (12760, 12790), True, 'import numpy as np\n'), ((12888, 12901), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (12894, 12901), True, 'import numpy as np\n'), ((12959, 12991), 'external.transformations.translation_matrix', 'tf.translation_matrix', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (12980, 12991), True, 'import external.transformations as tf\n'), ((13006, 13045), 'Geometry.transform_point', 'transform_point', (['self.center', 'transform'], {}), '(self.center, transform)\n', (13021, 13045), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((14082, 14090), 'Trace.Photon', 'Photon', ([], {}), '()\n', (14088, 14090), False, 'from Trace import Photon\n'), ((14232, 14270), 'numpy.random.randint', 'np.random.randint', (['(1)', '(self.spacing + 1)'], {}), '(1, self.spacing + 1)\n', (14249, 14270), True, 'import numpy as np\n'), ((14296, 14334), 'numpy.random.randint', 'np.random.randint', (['(1)', '(self.spacing + 1)'], {}), '(1, self.spacing + 1)\n', (14313, 14334), True, 'import numpy as np\n'), ((14669, 14682), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (14675, 14682), True, 'import numpy as np\n'), ((14740, 14772), 'external.transformations.translation_matrix', 'tf.translation_matrix', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (14761, 14772), True, 'import external.transformations as tf\n'), ((14787, 14826), 'Geometry.transform_point', 'transform_point', (['self.center', 'transform'], {}), '(self.center, transform)\n', (14802, 14826), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((2542, 2551), 'Geometry.norm', 'norm', (['vec'], {}), '(vec)\n', (2546, 2551), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((2568, 2632), 'Geometry.rotation_matrix_from_vector_alignment', 'rotation_matrix_from_vector_alignment', (['self.direction', '[0, 0, 1]'], {}), '(self.direction, [0, 0, 1])\n', (2605, 2632), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((2665, 2692), 'Geometry.transform_direction', 'transform_direction', (['vec', 'R'], {}), '(vec, R)\n', (2684, 2692), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((4702, 4736), 'external.transformations.translation_matrix', 'tf.translation_matrix', (['translation'], {}), '(translation)\n', (4723, 4736), True, 'import external.transformations as tf\n'), ((4814, 4845), 'external.transformations.rotation_matrix', 'tf.rotation_matrix', (['angle', 'axis'], {}), '(angle, axis)\n', (4832, 4845), True, 'import external.transformations as tf\n'), ((7022, 7072), 'numpy.random.uniform', 'np.random.uniform', (['(-self.focussize)', 'self.focussize'], {}), '(-self.focussize, self.focussize)\n', (7039, 7072), True, 'import numpy as np\n'), ((7116, 7166), 'numpy.random.uniform', 'np.random.uniform', (['(-self.focussize)', 'self.focussize'], {}), '(-self.focussize, self.focussize)\n', (7133, 7166), True, 'import numpy as np\n'), ((8932, 8950), 'numpy.tan', 'np.tan', (['self.angle'], {}), '(self.angle)\n', (8938, 8950), True, 'import numpy as np\n'), ((8963, 9022), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[2]', 'self.planeextent[2]'], {}), '(self.planeorigin[2], self.planeextent[2])\n', (8980, 9022), True, 'import numpy as np\n'), ((9205, 9255), 'numpy.random.uniform', 'np.random.uniform', (['(-self.focussize)', 'self.focussize'], {}), '(-self.focussize, self.focussize)\n', (9222, 9255), True, 'import numpy as np\n'), ((9299, 9349), 'numpy.random.uniform', 'np.random.uniform', (['(-self.focussize)', 'self.focussize'], {}), '(-self.focussize, self.focussize)\n', (9316, 9349), True, 'import numpy as np\n'), ((10492, 10526), 'external.transformations.translation_matrix', 'tf.translation_matrix', (['translation'], {}), '(translation)\n', (10513, 10526), True, 'import external.transformations as tf\n'), ((10600, 10631), 'external.transformations.rotation_matrix', 'tf.rotation_matrix', (['angle', 'axis'], {}), '(angle, axis)\n', (10618, 10631), True, 'import external.transformations as tf\n'), ((10951, 10962), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (10957, 10962), True, 'import numpy as np\n'), ((10977, 10988), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (10983, 10988), True, 'import numpy as np\n'), ((11361, 11372), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (11367, 11372), True, 'import numpy as np\n'), ((11373, 11386), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (11379, 11386), True, 'import numpy as np\n'), ((11399, 11410), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (11405, 11410), True, 'import numpy as np\n'), ((11411, 11424), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (11417, 11424), True, 'import numpy as np\n'), ((12812, 12823), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (12818, 12823), True, 'import numpy as np\n'), ((12824, 12837), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (12830, 12837), True, 'import numpy as np\n'), ((12850, 12861), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (12856, 12861), True, 'import numpy as np\n'), ((12862, 12875), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (12868, 12875), True, 'import numpy as np\n'), ((14586, 14597), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (14592, 14597), True, 'import numpy as np\n'), ((14598, 14611), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (14604, 14611), True, 'import numpy as np\n'), ((14630, 14641), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (14636, 14641), True, 'import numpy as np\n'), ((14642, 14655), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (14648, 14655), True, 'import numpy as np\n'), ((1120, 1139), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1137, 1139), True, 'import numpy as np\n'), ((1163, 1182), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1180, 1182), True, 'import numpy as np\n'), ((5519, 5538), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5536, 5538), True, 'import numpy as np\n'), ((7522, 7541), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (7539, 7541), True, 'import numpy as np\n'), ((9712, 9731), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9729, 9731), True, 'import numpy as np\n'), ((11687, 11706), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (11704, 11706), True, 'import numpy as np\n'), ((13239, 13258), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (13256, 13258), True, 'import numpy as np\n'), ((15020, 15039), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (15037, 15039), True, 'import numpy as np\n')] |