hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0684a59b5485fb53c2b1588876b4d3d204cb4623 | 4,046 | py | Python | braids/resources/characters.py | oscillating-gate/eurorack | 35bf03aa35b01a7a4a9b0a0ca2898677cd3a9f6a | [
"MIT"
] | 233 | 2018-07-02T16:49:36.000Z | 2022-02-27T21:45:39.000Z | braids/resources/characters.py | oscillating-gate/eurorack | 35bf03aa35b01a7a4a9b0a0ca2898677cd3a9f6a | [
"MIT"
] | 24 | 2018-07-09T11:32:15.000Z | 2022-01-07T01:45:43.000Z | braids/resources/characters.py | oscillating-gate/eurorack | 35bf03aa35b01a7a4a9b0a0ca2898677cd3a9f6a | [
"MIT"
] | 24 | 2018-07-14T21:55:30.000Z | 2021-05-04T04:20:34.000Z | # Copyright 2012 Olivier Gillet.
#
# Author: Olivier Gillet ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# See http://creativecommons.org/licenses/MIT/ for more information.
#
# -----------------------------------------------------------------------------
#
# Characters definitions.
characters = []
MASKS = {
'a': 0x8000,
'b': 0x4000,
'c': 0x2000,
'd': 0x1000,
'e': 0x0800,
'f': 0x0400,
'g': 0x0200,
'h': 0x0100,
'j': 0x80,
'k': 0x40,
'l': 0x20,
'm': 0x10,
'n': 0x08,
'p': 0x04,
}
characters = {
'A': 'afepkbc',
'B': 'adhmbck',
'C': 'afed',
'D': 'adhmbc',
'E': 'afedkp',
'F': 'afepk',
'G': 'afedck',
'H': 'febcpk',
'I': 'adhm',
'J': 'bcde',
'K': 'efpjl',
'L': 'def',
'M': 'efgjbc',
'N': 'efglcb',
'O': 'abcdef',
'P': 'abpkef',
'Q': 'abcdefl',
'R': 'abpkefl',
'S': 'afpkcd',
'T': 'ahm',
'U': 'bcdef',
'V': 'fenj',
'W': 'fenlcb',
'X': 'gjln',
'Y': 'gjm',
'Z': 'ajnd',
'a': 'abpkecd',
'b': 'fedlp',
'c': 'pked',
'd': 'bcdnk',
'e': 'afped',
'f': 'afpe',
'g': 'agkbcd',
'h': 'fpkec',
'i': 'mpkd',
'j': 'kcd',
'k': 'hmjl',
'l': 'hm',
'm': 'epkmc',
'n': 'mkc',
'o': 'pkecd',
'p': 'afpje',
'q': 'afpkbl',
'r': 'mk',
's': 'agkcd',
't': 'fedp',
'u': 'edc',
'v': 'en',
'w': 'enlc',
'x': 'gnjl',
'y': 'gkbcd',
'z': 'pnd',
'0': 'abcdefnj',
'1': 'jbc',
'2': 'abpked',
'3': 'abcdk',
'4': 'fpkbc',
'5': 'afpkcd',
'6': 'afpkcde',
'7': 'ajm',
'8': 'abcdefpk',
'9': 'abcpkfd',
'!': 'hm',
'"': 'fh',
'#': 'pkdhmbc',
'$': 'afpkcdhm',
'%': 'jnfc',
'&': 'aghpeld',
'\'': 'h',
'(': 'afed',
')': 'abcd',
'*': 'ghjmnl',
'+': 'hmpk',
',': 'n',
'-': 'pk',
'.': 'm',
'/': 'jn',
':': 'hm',
';': 'hn',
'<': 'jl',
'>': 'gn',
'?': 'fajm',
'=': 'pkd',
'@': 'kmcbafed',
'[': 'afed',
']': 'abcd',
'\\': 'gl',
'^': 'nl',
'_': 'd',
'`': 'g',
'{': 'pgnad',
'|': 'hm',
'}': 'ajldk',
'~': 'pk',
# LRDU
'\x80': 'jlbc',
'\x81': 'efgn',
'\x82': 'agj',
'\x83': 'dnl',
# LRDU arrow
'\x84': 'jkl',
'\x85': 'gpn',
'\x86': 'ghj',
'\x87': 'nml',
# Waveforms
'\x88': 'njbc', # Saw
'\x89': 'enjb', # Centered saw
'\x8A': 'mn', # Baby saw
'\x8B': 'nl', # Tri
'\x8C': 'efabc', # Square
'\x8D': 'epkc', # Baby square
'\x8E': 'dhm', # Pulse
'\x8F': 'efgl', # AD
# Spinner
'\x90': 'abcdefn',
'\x91': 'abcdefp',
'\x92': 'abcdefg',
'\x93': 'abcdefh',
'\x94': 'abcdefj',
'\x95': 'abcdefk',
'\x96': 'abcdefl',
'\x97': 'abcdefm',
# Spinner 2
'\x98': 'ab',
'\x99': 'abc',
'\x9A': 'bcd',
'\x9B': 'cde',
'\x9C': 'de',
'\x9D': 'def',
'\x9E': 'efa',
'\x9F': 'fab',
'\xA0': 'abcdefghjklmnp',
'\xFF': 'abcdefghjklmnp',
'null': 'null'
}
character_table = []
for i in xrange(256):
segments = characters.get(chr(i), '')
character_table.append(sum(MASKS[segment] for segment in segments))
characters = [('characters', character_table)]
| 20.129353 | 79 | 0.505685 |
5dde8c9aec8fd941c02d63e3b877a5d9872e789f | 1,143 | py | Python | core/dense.py | vishalmanes109/nn-from-scratch | 53de76f39dfea3f625ec542536f0ab3bc44d0224 | [
"MIT"
] | 2 | 2020-10-09T05:50:14.000Z | 2021-04-10T08:52:03.000Z | core/dense.py | vishalmanes109/nn-from-scratch | 53de76f39dfea3f625ec542536f0ab3bc44d0224 | [
"MIT"
] | null | null | null | core/dense.py | vishalmanes109/nn-from-scratch | 53de76f39dfea3f625ec542536f0ab3bc44d0224 | [
"MIT"
] | null | null | null | from .layer import Layer
import numpy as np
import config
class Dense(Layer):
# input_size = Number of Input Neurons
# output_size = Number of Output Neurons
def __init__(self, input_size, output_size):
self.weights = np.random.rand(input_size, output_size) - 0.5
self.bias = np.random.rand(1, output_size) - 0.5
self.vW = np.zeros([input_size, output_size])
self.vB = np.zeros([1, output_size])
def forward_propagation(self, input_data):
self.input = input_data
self.output = np.dot(self.input, self.weights) + self.bias
return self.output
def backward_propagation(self, output_error, optimizer_fn, learning_rate):
input_error = np.dot(output_error, self.weights.T)
dW = np.dot(self.input.T, output_error)
dB = output_error
w_updated, b_updated, vW_updated, vB_updated = optimizer_fn.minimize(
self.weights, self.bias, dW, dB, self.vW, self.vB, learning_rate
)
self.weights = w_updated
self.bias = b_updated
self.vW = vW_updated
self.vB = vB_updated
return input_error
| 34.636364 | 78 | 0.657918 |
747f02f3688a9b2541bc42c21f75e326d7c645b5 | 978 | py | Python | bot.py | RobinMcNally/DiscordBot | 5fe03d23e72a7eeb0719f6cb1aa63c9c6c9c67c1 | [
"MIT"
] | null | null | null | bot.py | RobinMcNally/DiscordBot | 5fe03d23e72a7eeb0719f6cb1aa63c9c6c9c67c1 | [
"MIT"
] | null | null | null | bot.py | RobinMcNally/DiscordBot | 5fe03d23e72a7eeb0719f6cb1aa63c9c6c9c67c1 | [
"MIT"
] | null | null | null | import json
import os.path
import sys
import urllib.request
from features.roll_backend import roll_backend
from features.asciify_backend import asciify_backend
from discord.ext.commands import Bot
generic_error = "```I'm just a poor robot. Stop trying to break me!```"
#Bot function definitions
bot = Bot(command_prefix="!")
@bot.event
async def on_read():
print("Client logged in")
@bot.command()
async def asciify(*args):
return await bot.say("Asciify coming soon")
@bot.command()
async def roll(*args):
return await bot.say(roll_backend(args))
if __name__ == "__main__":
if not os.path.isfile('OAuth.json'):
print("Bot cannot execute without OAuth.json")
sys.exit()
with open('OAuth.json') as token_file:
token_data = json.load(token_file)
if 'OAuthToken' not in token_data:
print("Malformed OAuth.json")
sys.exit()
OAuthToken = token_data['OAuthToken']
bot.run(OAuthToken)
| 24.45 | 71 | 0.688139 |
f55c513370946b425d988bc66b2b690b1a68248b | 4,113 | py | Python | datalad/support/json_py.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | datalad/support/json_py.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | datalad/support/json_py.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Simple wrappers to get uniform JSON input and output
"""
import io
import codecs
from six import PY2
from os.path import dirname
from os.path import exists
from os import makedirs
import os
import os.path as op
# wrapped below
from simplejson import load as jsonload
from simplejson import dump as jsondump
# simply mirrored for now
from simplejson import loads as json_loads
from simplejson import JSONDecodeError
# produce relatively compact, but also diff-friendly format
json_dump_kwargs = dict(
indent=0,
separators=(',', ':\n'),
sort_keys=True,
ensure_ascii=False,
encoding='utf-8', )
# achieve minimal representation, but still deterministic
compressed_json_dump_kwargs = dict(
json_dump_kwargs,
indent=None,
separators=(',', ':'))
# Let's just reuse top level one for now
from ..log import lgr
from ..dochelpers import exc_str
def dump(obj, fname):
indir = dirname(fname)
if not exists(indir):
makedirs(indir)
with io.open(fname, 'wb') as f:
return dump2fileobj(obj, f)
def dump2fileobj(obj, fileobj):
return jsondump(
obj,
codecs.getwriter('utf-8')(fileobj),
**json_dump_kwargs)
def LZMAFile(*args, **kwargs):
"""A little decorator to overcome a bug in lzma
A unique to yoh and some others bug with pyliblzma
calling dir() helps to avoid AttributeError __exit__
see https://bugs.launchpad.net/pyliblzma/+bug/1219296
"""
from .lzma import lzma
lzmafile = lzma.LZMAFile(*args, **kwargs)
dir(lzmafile)
return lzmafile
def dump2stream(obj, fname, compressed=False):
_open = LZMAFile if compressed else open
indir = dirname(fname)
if op.lexists(fname):
os.remove(fname)
elif indir and not exists(indir):
makedirs(indir)
with _open(fname, mode='wb') as f:
jwriter = codecs.getwriter('utf-8')(f)
for o in obj:
jsondump(o, jwriter, **compressed_json_dump_kwargs)
f.write(b'\n')
def dump2xzstream(obj, fname):
dump2stream(obj, fname, compressed=True)
def load_stream(fname, compressed=False):
_open = LZMAFile if compressed else open
with _open(fname, mode='r') as f:
for line in f:
yield loads(line)
def load_xzstream(fname):
for o in load_stream(fname, compressed=True):
yield o
def loads(s, *args, **kwargs):
"""Helper to log actual value which failed to be parsed"""
try:
return json_loads(s, *args, **kwargs)
except:
lgr.error(
"Failed to load content from %r with args=%r kwargs=%r"
% (s, args, kwargs)
)
raise
def load(fname, fixup=True, **kw):
"""Load JSON from a file, possibly fixing it up if initial load attempt fails
Parameters
----------
fixup : bool
In case of failed load, apply a set of fixups with hope to resolve issues
in JSON
**kw
Passed into the load (and loads after fixups) function
"""
with io.open(fname, 'r', encoding='utf-8') as f:
try:
return jsonload(f, **kw)
except JSONDecodeError as exc:
if not fixup:
raise
lgr.warning("Failed to decode content in %s: %s. Trying few tricks", fname, exc_str(exc))
# Load entire content and replace common "abusers" which break JSON comprehension but in general
# are Ok
with io.open(fname, 'r', encoding='utf-8') as f:
s_orig = s = f.read()
for o, r in {
u"\xa0": " ", # non-breaking space
}.items():
s = s.replace(o, r)
if s == s_orig:
# we have done nothing, so just reraise previous exception
raise
return loads(s, **kw)
| 26.197452 | 101 | 0.616095 |
b63b157f0103b6c6cd8cb84646fd73d9f1ce74ec | 37,398 | py | Python | test/functional/wallet_importmulti.py | stamhe/bitcoin-abc | a1ba303c6b4f164ae94612e83b824e564405a96e | [
"MIT"
] | 1 | 2022-01-09T22:29:10.000Z | 2022-01-09T22:29:10.000Z | test/functional/wallet_importmulti.py | EGYVOICE/bitcoin-abc-avalanche | e0f1fe857e1fc85f01903f1c323c2d5c54aecc1c | [
"MIT"
] | 17 | 2021-08-06T21:27:41.000Z | 2022-03-31T08:28:08.000Z | test/functional/wallet_importmulti.py | EGYVOICE/bitcoin-abc-avalanche | e0f1fe857e1fc85f01903f1c323c2d5c54aecc1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.descriptors import descsum_create
from test_framework.script import OP_NOP, CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import get_key, get_multisig, test_address
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None,
error_message=None, warnings=None):
"""Run importmulti and assert success"""
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal(
"\n".join(
sorted(warnings)), "\n".join(
sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
node0_address1 = self.nodes[0].getaddressinfo(
self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info(
"Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Unsuccessful labelling for internal addresses"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info(
"Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info(
"Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info(
"Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info(
"Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info(
"Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(
self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
iswatchonly=True,
ismine=False,
solvable=True)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info(
"Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(
0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(
self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info(
"Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info(
"Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info(
"Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info(
"Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info(
"Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should
# replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of
# watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Test that importing of a P2PKH address via descriptor without
# checksum fails
key = get_key(self.nodes[0])
self.log.info(
"Should fail to import a p2pkh address from descriptor with no checksum")
self.test_importmulti({"desc": "pkh(" + key.pubkey + ")",
"timestamp": "now",
"label": "Descriptor import test"},
success=False,
error_code=-5,
error_message='Missing checksum')
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
# hdkeypath=m/0'/0'/0' and 1'
addresses = [
"ecregtest:prvn9ycvgr5atuyh49sua3mapskh2mnnzg7t9yp6dt",
"ecregtest:pp3n087yx0njv2e5wcvltahfxqst7l66rutz8ceeat"]
# pkh subscripts corresponding to the above addresses
addresses += [
"ecregtest:qqdkxd2xnzftq2p8wr3sqqyw8lntap7tncs546s6pr",
"ecregtest:qpyryy83jfaec5u0gpzldk6teadsuq8zlyqh5l30uq",
]
desc = "sh(pkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info(
"Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info(
"Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1], address, solvable=True, ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
# Note: in Core's test, this address refers to the sh(wpkh()) address.
# For a sh(pkh()) this does not refer to a key, so we use the subscript
# address instead, which returns the same privkey.
address = "ecregtest:qzh6rch6st3wjvp0h2ud87gn7xnxvf6h8yrk8gcg8t"
desc = "sh(pkh(" + wif_priv + "))"
self.log.info(
"Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
p2pkh_label = "P2PKH descriptor import"
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": p2pkh_label},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
labels=[p2pkh_label])
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info(
"Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info(
"Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info(
"Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info(
"Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc': descsum_create("pkh([" + pub_fpr + pub_keypath[1:] + "]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc': descsum_create("pkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(
wallet_name="noprivkeys",
disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('pkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress()
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey
# wallet
self.log.info(
"Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('pkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress()
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info(
'Imported scripts with pubkeys shoud not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('sh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info(
"Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc(self.default_wallet_name)
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(
result[0]['error']['message'],
"Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'ecregtest:qp0v86h53rc92hjrlpwzpjtdlgzsxu25svv6g40fpl', # m/0'/0'/0
'ecregtest:qqasy0zlkdleqt4pkn8fs4ehm5gnnz6qpgdcpt90fq', # m/0'/0'/1
'ecregtest:qp0sp4wlhctvprqvdt2dgvqcfdjssu04xgey0l3syw', # m/0'/0'/2
'ecregtest:qrhn24tegn04cptfv4ldhtkduxq55zcwrycjfdj9vr', # m/0'/0'/3
'ecregtest:qzpqhett2uwltq803vrxv7zkqhft5vsnmcjeh50v0p', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('pkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range': [0, 4],
}]
)
self.log.info(result)
for i in range(0, 5):
addr = wrpc.getnewaddress('')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
| 47.459391 | 299 | 0.552142 |
86b3348ca8f6adc57a37a549254eae6794e335dd | 8,665 | py | Python | src/models/udt/train.py | alexmlamb/SPUDT | 5d4ff32c9e37a485c176d3e68c58723e544972e5 | [
"MIT"
] | null | null | null | src/models/udt/train.py | alexmlamb/SPUDT | 5d4ff32c9e37a485c176d3e68c58723e544972e5 | [
"MIT"
] | null | null | null | src/models/udt/train.py | alexmlamb/SPUDT | 5d4ff32c9e37a485c176d3e68c58723e544972e5 | [
"MIT"
] | null | null | null | import time
import os
import torch
import torch.nn.functional as F
from torch import optim
from common.util import sample, save_models
from evaluation.fid import calculate_fid
from common.initialize import initialize, infer_iteration
from . import model
def gp_loss(x, y, d, device):
batch_size = x.size()[0]
gp_alpha = torch.rand(batch_size, 1, 1, 1, device=device)
interp = gp_alpha * x.data + (1 - gp_alpha) * y.data
interp.requires_grad = True
d_interp = d(interp)
grad_interp = torch.autograd.grad(outputs=d_interp, inputs=interp,
grad_outputs=torch.ones(d_interp.size(), device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
diff = grad_interp.norm(2, dim=1) - 1
diff = torch.clamp(diff, 0)
return torch.mean(diff**2)
def semantic_loss(data, nz, generator, classifier, device):
label = classifier(data).argmax(1)
z = torch.randn(data.shape[0], nz, device=device)
gen = generator(data, z)
pred = classifier(gen)
return F.cross_entropy(pred, label)
def cycle_loss(data, nz, generator1, generator2, device):
z = torch.randn(data.shape[0], nz, device=device)
gen = generator1(data, z)
z = torch.randn(data.shape[0], nz, device=device)
cycle = generator2(gen, z)
return F.l1_loss(data, cycle)
def identity_loss(data, nz, generator, device):
z = torch.randn(data.shape[0], nz, device=device)
gen = generator(data, z)
return F.l1_loss(gen, data)
def compute_critic_loss(data, nz, target, critic, generator, device):
z = torch.randn(data.shape[0], nz, device=device)
gen = generator(data, z).detach()
pos_loss = critic(target).mean()
neg_loss = critic(gen).mean()
gp = gp_loss(gen, target, critic, device)
return pos_loss - neg_loss + 10*gp
def generator_loss(data, nz, critic, generator, device):
z = torch.randn(data.shape[0], nz, device=device)
gen = generator(data, z)
return critic(gen).mean()
def define_models(shape1, **parameters):
critic = model.Critic(shape1[0], **parameters)
generator = model.Generator(shape1[0], **parameters)
return {
'critic': critic,
'generator': generator,
}
@torch.no_grad()
def evaluate(loader, nz, transfer, classifier, device):
correct = 0
total = 0
for data, label in loader:
data, label = data.to(device), label.to(device)
z = torch.randn(data.shape[0], nz, device=device)
gen = transfer(data, z)
pred = F.softmax(classifier(gen), 1).argmax(1)
correct += (pred == label).sum().cpu().float()
total += len(pred)
accuracy = correct / total
accuracy = accuracy.cpu().numpy()
return accuracy
@torch.no_grad()
def plot_transfer(visualiser, data, target, nz, transfer, id, i, device):
z = torch.randn(data.shape[0], nz, device=device)
transfered = transfer(data, z)
merged = len(data)*2 * [None]
merged[:2*len(data):2] = data
merged[1:2*len(transfered):2] = transfered
merged = torch.stack(merged)
visualiser.image(merged.cpu().numpy(), f'Comparison{id}', i)
visualiser.image(target.cpu().numpy(), title=f'Target {id}', step=i)
def train(args):
parameters = vars(args)
train_loader1, valid_loader1, test_loader1 = args.loaders1
train_loader2, valid_loader2, test_loader2 = args.loaders2
models = define_models(**parameters)
initialize(models, args.reload, args.save_path, args.model_path)
critic = models['critic'].to(args.device)
generator = models['generator'].to(args.device)
evalY = args.evalY.to(args.device).eval()
semantic = args.semantic.to(args.device).eval()
print(generator)
print(critic)
print(semantic)
optim_critic = optim.Adam(critic.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optim_generator = optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
iter1 = iter(train_loader1)
iter2 = iter(train_loader2)
iteration = infer_iteration(list(models.keys())[0], args.reload, args.model_path, args.save_path)
t0 = time.time()
for i in range(iteration, args.iterations):
critic.train()
generator.train()
for _ in range(args.d_updates):
batchx, iter1 = sample(iter1, train_loader1)
datax = batchx[0].to(args.device)
batchy, iter2 = sample(iter2, train_loader2)
datay = batchy[0].to(args.device)
critic_lossy = compute_critic_loss(datax, args.z_dim, datay, critic, generator, args.device)
optim_critic.zero_grad()
critic_lossy.backward()
optim_critic.step()
batchx, iter1 = sample(iter1, train_loader1)
datax = batchx[0].to(args.device)
batchy, iter2 = sample(iter2, train_loader2)
datay = batchy[0].to(args.device)
glossxy = generator_loss(datax, args.z_dim, critic, generator, args.device)
slossxy = semantic_loss(datax, args.z_dim, generator, semantic, args.device)
optim_generator.zero_grad()
glossxy.backward()
(args.gsxy*slossxy).backward()
optim_generator.step()
if i % args.evaluate == 0:
print('Iter: %s' % i, time.time() - t0)
generator.eval()
save_path = args.save_path
plot_transfer(args.visualiser, datax, datay, args.z_dim, generator, 'x-y', i, args.device)
test_accuracy_xy = evaluate(test_loader1, args.z_dim, generator, evalY, args.device)
with open(os.path.join(save_path, 'glossxy'), 'a') as f: f.write(f'{i},{glossxy.cpu().item()}\n')
with open(os.path.join(save_path, 'slossxy'), 'a') as f: f.write(f'{i},{slossxy.cpu().item()}\n')
with open(os.path.join(save_path, 'test_accuracy_xy'), 'a') as f: f.write(f'{i},{test_accuracy_xy}\n')
args.visualiser.plot(critic_lossy.cpu().detach().numpy(), title='critic_lossy', step=i)
args.visualiser.plot(glossxy.cpu().detach().numpy(), title='glossxy', step=i)
args.visualiser.plot(slossxy.cpu().detach().numpy(), title='slossxy', step=i)
args.visualiser.plot(test_accuracy_xy, title=f'Test transfer accuracy X-Y', step=i)
t0 = time.time()
save_models(models, 0, args.model_path, args.checkpoint)
@torch.no_grad()
def evaluate_fid(args):
parameters = vars(args)
_, _, test_loader1 = args.loaders1
_, _, test_loader2 = args.loaders2
models = define_models(**parameters)
initialize(models, True, args.save_path, args.model_path)
generatorXY = models['generatorXY'].to(args.device)
generatorYX = models['generatorYX'].to(args.device)
datas1 = []
labels1 = []
gens1 = []
for i, (data, label) in enumerate(test_loader1):
data, label = data.to(args.device), label.to(args.device)
datas1 += [data]
labels1 += [label]
z = torch.randn(len(data), args.z_dim, device=args.device)
gen = generatorXY(data, z)
gens1 += [gen]
datas1 = torch.cat(datas1)
labels1 = torch.cat(labels1)
gens1 = torch.cat(gens1)
datas2 = []
labels2 = []
gens2 = []
for i, (data, label) in enumerate(test_loader2):
data, label = data.to(args.device), label.to(args.device)
datas2 += [data]
labels2 += [label]
z = torch.randn(len(data), args.z_dim, device=args.device)
gen = generatorYX(data, z)
gens2 += [gen]
datas2 = torch.cat(datas2)
labels2 = torch.cat(labels2)
gens2 = torch.cat(gens2)
#fid = calculate_fid(datas1[:1000], datas1[1000:2000], 50, args.device, 2048)
#print(f'fid datasetX: {fid}')
#fid = calculate_fid(datas2[:1000], datas2[1000:2000], 50, args.device, 2048)
#print(f'fid datasetY: {fid}')
fid = calculate_fid(datas1, gens2, 50, args.device, 2048)
save_path = args.save_path
with open(os.path.join(save_path, 'fid_yx'), 'w') as f: f.write(f'{fid}\n')
print(f'fid Y->X: {fid}')
fid = calculate_fid(datas2, gens1, 50, args.device, 2048)
with open(os.path.join(save_path, 'fid_xy'), 'w') as f: f.write(f'{fid}\n')
print(f'fid X->Y: {fid}')
#for i in range(10):
# l1 = labels1 == i
# l2 = labels2 == i
# d, g = datas1[l1], gens2[l2]
# fid = calculate_fid(d, g, 50, args.device, 2048)
# print(f'intra-fid label {i} Y->X: {fid}')
# d, g = datas2[l2], gens1[l1]
# fid = calculate_fid(d, g, 50, args.device, 2048)
# print(f'intra-fid label {i} X->Y: {fid}')
| 36.87234 | 114 | 0.633353 |
9af5dc5f75cc9a70253e14625aab13e5e4dc1558 | 114,849 | py | Python | mypy/nodes.py | aghast/mypy | 13ae58ffe8bedb7da9f4c657297f0d61e681d671 | [
"PSF-2.0"
] | null | null | null | mypy/nodes.py | aghast/mypy | 13ae58ffe8bedb7da9f4c657297f0d61e681d671 | [
"PSF-2.0"
] | null | null | null | mypy/nodes.py | aghast/mypy | 13ae58ffe8bedb7da9f4c657297f0d61e681d671 | [
"PSF-2.0"
] | null | null | null | """Abstract syntax tree node classes (i.e. parse tree)."""
import os
from abc import abstractmethod
from mypy.ordered_dict import OrderedDict
from collections import defaultdict
from typing import (
Any, TypeVar, List, Tuple, cast, Set, Dict, Union, Optional, Callable, Sequence, Iterator
)
from typing_extensions import DefaultDict, Final, TYPE_CHECKING
from mypy_extensions import trait
import mypy.strconv
from mypy.util import short_type
from mypy.visitor import NodeVisitor, StatementVisitor, ExpressionVisitor
from mypy.bogus_type import Bogus
class Context:
"""Base type for objects that are valid as error message locations."""
__slots__ = ('line', 'column', 'end_line')
def __init__(self, line: int = -1, column: int = -1) -> None:
self.line = line
self.column = column
self.end_line = None # type: Optional[int]
def set_line(self,
target: Union['Context', int],
column: Optional[int] = None,
end_line: Optional[int] = None) -> None:
"""If target is a node, pull line (and column) information
into this node. If column is specified, this will override any column
information coming from a node.
"""
if isinstance(target, int):
self.line = target
else:
self.line = target.line
self.column = target.column
self.end_line = target.end_line
if column is not None:
self.column = column
if end_line is not None:
self.end_line = end_line
def get_line(self) -> int:
"""Don't use. Use x.line."""
return self.line
def get_column(self) -> int:
"""Don't use. Use x.column."""
return self.column
if TYPE_CHECKING:
# break import cycle only needed for mypy
import mypy.types
T = TypeVar('T')
JsonDict = Dict[str, Any]
# Symbol table node kinds
#
# TODO rename to use more descriptive names
LDEF = 0 # type: Final[int]
GDEF = 1 # type: Final[int]
MDEF = 2 # type: Final[int]
# Placeholder for a name imported via 'from ... import'. Second phase of
# semantic will replace this the actual imported reference. This is
# needed so that we can detect whether a name has been imported during
# XXX what?
UNBOUND_IMPORTED = 3 # type: Final[int]
# RevealExpr node kinds
REVEAL_TYPE = 0 # type: Final[int]
REVEAL_LOCALS = 1 # type: Final[int]
LITERAL_YES = 2 # type: Final
LITERAL_TYPE = 1 # type: Final
LITERAL_NO = 0 # type: Final
node_kinds = {
LDEF: 'Ldef',
GDEF: 'Gdef',
MDEF: 'Mdef',
UNBOUND_IMPORTED: 'UnboundImported',
} # type: Final
inverse_node_kinds = {_kind: _name for _name, _kind in node_kinds.items()} # type: Final
implicit_module_attrs = {'__name__': '__builtins__.str',
'__doc__': None, # depends on Python version, see semanal.py
'__file__': '__builtins__.str',
'__package__': '__builtins__.str'} # type: Final
# These aliases exist because built-in class objects are not subscriptable.
# For example `list[int]` fails at runtime. Instead List[int] should be used.
type_aliases = {
'typing.List': 'builtins.list',
'typing.Dict': 'builtins.dict',
'typing.Set': 'builtins.set',
'typing.FrozenSet': 'builtins.frozenset',
'typing.ChainMap': 'collections.ChainMap',
'typing.Counter': 'collections.Counter',
'typing.DefaultDict': 'collections.defaultdict',
'typing.Deque': 'collections.deque',
} # type: Final
# This keeps track of the oldest supported Python version where the corresponding
# alias _target_ is available.
type_aliases_target_versions = {
'typing.List': (2, 7),
'typing.Dict': (2, 7),
'typing.Set': (2, 7),
'typing.FrozenSet': (2, 7),
'typing.ChainMap': (3, 3),
'typing.Counter': (2, 7),
'typing.DefaultDict': (2, 7),
'typing.Deque': (2, 7),
} # type: Final
reverse_builtin_aliases = {
'builtins.list': 'typing.List',
'builtins.dict': 'typing.Dict',
'builtins.set': 'typing.Set',
'builtins.frozenset': 'typing.FrozenSet',
} # type: Final
nongen_builtins = {'builtins.tuple': 'typing.Tuple',
'builtins.enumerate': ''} # type: Final
nongen_builtins.update((name, alias) for alias, name in type_aliases.items())
RUNTIME_PROTOCOL_DECOS = ('typing.runtime_checkable',
'typing_extensions.runtime',
'typing_extensions.runtime_checkable') # type: Final
class Node(Context):
"""Common base class for all non-type parse tree nodes."""
__slots__ = ()
def __str__(self) -> str:
ans = self.accept(mypy.strconv.StrConv())
if ans is None:
return repr(self)
return ans
def accept(self, visitor: NodeVisitor[T]) -> T:
raise RuntimeError('Not implemented')
@trait
class Statement(Node):
"""A statement node."""
__slots__ = ()
def accept(self, visitor: StatementVisitor[T]) -> T:
raise RuntimeError('Not implemented')
@trait
class Expression(Node):
"""An expression node."""
__slots__ = ()
def accept(self, visitor: ExpressionVisitor[T]) -> T:
raise RuntimeError('Not implemented')
class FakeExpression(Expression):
"""A dummy expression.
We need a dummy expression in one place, and can't instantiate Expression
because it is a trait and mypyc barfs.
"""
pass
# TODO:
# Lvalue = Union['NameExpr', 'MemberExpr', 'IndexExpr', 'SuperExpr', 'StarExpr'
# 'TupleExpr']; see #1783.
Lvalue = Expression
@trait
class SymbolNode(Node):
"""Nodes that can be stored in a symbol table."""
__slots__ = ()
@property
@abstractmethod
def name(self) -> str: pass
# fullname can often be None even though the type system
# disagrees. We mark this with Bogus to let mypyc know not to
# worry about it.
@property
@abstractmethod
def fullname(self) -> Bogus[str]: pass
@abstractmethod
def serialize(self) -> JsonDict: pass
@classmethod
def deserialize(cls, data: JsonDict) -> 'SymbolNode':
classname = data['.class']
method = deserialize_map.get(classname)
if method is not None:
return method(data)
raise NotImplementedError('unexpected .class {}'.format(classname))
# Items: fullname, related symbol table node, surrounding type (if any)
Definition = Tuple[str, 'SymbolTableNode', Optional['TypeInfo']]
class MypyFile(SymbolNode):
"""The abstract syntax tree of a single source file."""
# Fully qualified module name
_fullname = None # type: Bogus[str]
# Path to the file (empty string if not known)
path = ''
# Top-level definitions and statements
defs = None # type: List[Statement]
# Type alias dependencies as mapping from target to set of alias full names
alias_deps = None # type: DefaultDict[str, Set[str]]
# Is there a UTF-8 BOM at the start?
is_bom = False
names = None # type: SymbolTable
# All import nodes within the file (also ones within functions etc.)
imports = None # type: List[ImportBase]
# Lines on which to ignore certain errors when checking.
# If the value is empty, ignore all errors; otherwise, the list contains all
# error codes to ignore.
ignored_lines = None # type: Dict[int, List[str]]
# Is this file represented by a stub file (.pyi)?
is_stub = False
# Is this loaded from the cache and thus missing the actual body of the file?
is_cache_skeleton = False
# Does this represent an __init__.pyi stub with a module __getattr__
# (i.e. a partial stub package), for such packages we suppress any missing
# module errors in addition to missing attribute errors.
is_partial_stub_package = False
# Plugin-created dependencies
plugin_deps = None # type: Dict[str, Set[str]]
def __init__(self,
defs: List[Statement],
imports: List['ImportBase'],
is_bom: bool = False,
ignored_lines: Optional[Dict[int, List[str]]] = None) -> None:
super().__init__()
self.defs = defs
self.line = 1 # Dummy line number
self.imports = imports
self.is_bom = is_bom
self.alias_deps = defaultdict(set)
self.plugin_deps = {}
if ignored_lines:
self.ignored_lines = ignored_lines
else:
self.ignored_lines = {}
def local_definitions(self) -> Iterator[Definition]:
"""Return all definitions within the module (including nested).
This doesn't include imported definitions.
"""
return local_definitions(self.names, self.fullname)
@property
def name(self) -> str:
return '' if not self._fullname else self._fullname.split('.')[-1]
@property
def fullname(self) -> Bogus[str]:
return self._fullname
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_mypy_file(self)
def is_package_init_file(self) -> bool:
return len(self.path) != 0 and os.path.basename(self.path).startswith('__init__.')
def serialize(self) -> JsonDict:
return {'.class': 'MypyFile',
'_fullname': self._fullname,
'names': self.names.serialize(self._fullname),
'is_stub': self.is_stub,
'path': self.path,
'is_partial_stub_package': self.is_partial_stub_package,
}
@classmethod
def deserialize(cls, data: JsonDict) -> 'MypyFile':
assert data['.class'] == 'MypyFile', data
tree = MypyFile([], [])
tree._fullname = data['_fullname']
tree.names = SymbolTable.deserialize(data['names'])
tree.is_stub = data['is_stub']
tree.path = data['path']
tree.is_partial_stub_package = data['is_partial_stub_package']
tree.is_cache_skeleton = True
return tree
class ImportBase(Statement):
"""Base class for all import statements."""
is_unreachable = False # Set by semanal.SemanticAnalyzerPass1 if inside `if False` etc.
is_top_level = False # Ditto if outside any class or def
is_mypy_only = False # Ditto if inside `if TYPE_CHECKING` or `if MYPY`
# If an import replaces existing definitions, we construct dummy assignment
# statements that assign the imported names to the names in the current scope,
# for type checking purposes. Example:
#
# x = 1
# from m import x <-- add assignment representing "x = m.x"
assignments = None # type: List[AssignmentStmt]
def __init__(self) -> None:
super().__init__()
self.assignments = []
class Import(ImportBase):
"""import m [as n]"""
ids = None # type: List[Tuple[str, Optional[str]]] # (module id, as id)
def __init__(self, ids: List[Tuple[str, Optional[str]]]) -> None:
super().__init__()
self.ids = ids
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_import(self)
class ImportFrom(ImportBase):
"""from m import x [as y], ..."""
id = None # type: str
relative = None # type: int
names = None # type: List[Tuple[str, Optional[str]]] # Tuples (name, as name)
def __init__(self, id: str, relative: int, names: List[Tuple[str, Optional[str]]]) -> None:
super().__init__()
self.id = id
self.names = names
self.relative = relative
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_import_from(self)
class ImportAll(ImportBase):
"""from m import *"""
id = None # type: str
relative = None # type: int
# NOTE: Only filled and used by old semantic analyzer.
imported_names = None # type: List[str]
def __init__(self, id: str, relative: int) -> None:
super().__init__()
self.id = id
self.relative = relative
self.imported_names = []
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_import_all(self)
class ImportedName(SymbolNode):
"""Indirect reference to a fullname stored in symbol table.
This node is not present in the original program as such. This is
just a temporary artifact in binding imported names. After semantic
analysis pass 2, these references should be replaced with direct
reference to a real AST node.
Note that this is neither a Statement nor an Expression so this
can't be visited.
"""
def __init__(self, target_fullname: str) -> None:
super().__init__()
self.target_fullname = target_fullname
@property
def name(self) -> str:
return self.target_fullname.split('.')[-1]
@property
def fullname(self) -> str:
return self.target_fullname
def serialize(self) -> JsonDict:
assert False, "ImportedName leaked from semantic analysis"
@classmethod
def deserialize(cls, data: JsonDict) -> 'ImportedName':
assert False, "ImportedName should never be serialized"
def __str__(self) -> str:
return 'ImportedName(%s)' % self.target_fullname
FUNCBASE_FLAGS = [
'is_property', 'is_class', 'is_static', 'is_final'
] # type: Final
class FuncBase(Node):
"""Abstract base class for function-like nodes.
N.B: Although this has SymbolNode subclasses (FuncDef,
OverloadedFuncDef), avoid calling isinstance(..., FuncBase) on
something that is typed as SymbolNode. This is to work around
mypy bug #3603, in which mypy doesn't understand multiple
inheritance very well, and will assume that a SymbolNode
cannot be a FuncBase.
Instead, test against SYMBOL_FUNCBASE_TYPES, which enumerates
SymbolNode subclasses that are also FuncBase subclasses.
"""
__slots__ = ('type',
'unanalyzed_type',
'info',
'is_property',
'is_class', # Uses "@classmethod" (explicit or implicit)
'is_static', # Uses "@staticmethod"
'is_final', # Uses "@final"
'_fullname',
)
def __init__(self) -> None:
super().__init__()
# Type signature. This is usually CallableType or Overloaded, but it can be
# something else for decorated functions.
self.type = None # type: Optional[mypy.types.ProperType]
# Original, not semantically analyzed type (used for reprocessing)
self.unanalyzed_type = None # type: Optional[mypy.types.ProperType]
# If method, reference to TypeInfo
# TODO: Type should be Optional[TypeInfo]
self.info = FUNC_NO_INFO
self.is_property = False
self.is_class = False
self.is_static = False
self.is_final = False
# Name with module prefix
# TODO: Type should be Optional[str]
self._fullname = cast(Bogus[str], None)
@property
@abstractmethod
def name(self) -> str: pass
@property
def fullname(self) -> Bogus[str]:
return self._fullname
OverloadPart = Union['FuncDef', 'Decorator']
class OverloadedFuncDef(FuncBase, SymbolNode, Statement):
"""A logical node representing all the variants of a multi-declaration function.
A multi-declaration function is often an @overload, but can also be a
@property with a setter and a/or a deleter.
This node has no explicit representation in the source program.
Overloaded variants must be consecutive in the source file.
"""
items = None # type: List[OverloadPart]
unanalyzed_items = None # type: List[OverloadPart]
impl = None # type: Optional[OverloadPart]
def __init__(self, items: List['OverloadPart']) -> None:
super().__init__()
self.items = items
self.unanalyzed_items = items.copy()
self.impl = None
if len(items) > 0:
self.set_line(items[0].line, items[0].column)
self.is_final = False
@property
def name(self) -> str:
if self.items:
return self.items[0].name
else:
# This may happen for malformed overload
assert self.impl is not None
return self.impl.name
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_overloaded_func_def(self)
def serialize(self) -> JsonDict:
return {'.class': 'OverloadedFuncDef',
'items': [i.serialize() for i in self.items],
'type': None if self.type is None else self.type.serialize(),
'fullname': self._fullname,
'impl': None if self.impl is None else self.impl.serialize(),
'flags': get_flags(self, FUNCBASE_FLAGS),
}
@classmethod
def deserialize(cls, data: JsonDict) -> 'OverloadedFuncDef':
assert data['.class'] == 'OverloadedFuncDef'
res = OverloadedFuncDef([
cast(OverloadPart, SymbolNode.deserialize(d))
for d in data['items']])
if data.get('impl') is not None:
res.impl = cast(OverloadPart, SymbolNode.deserialize(data['impl']))
# set line for empty overload items, as not set in __init__
if len(res.items) > 0:
res.set_line(res.impl.line)
if data.get('type') is not None:
typ = mypy.types.deserialize_type(data['type'])
assert isinstance(typ, mypy.types.ProperType)
res.type = typ
res._fullname = data['fullname']
set_flags(res, data['flags'])
# NOTE: res.info will be set in the fixup phase.
return res
class Argument(Node):
"""A single argument in a FuncItem."""
__slots__ = ('variable', 'type_annotation', 'initializer', 'kind')
def __init__(self,
variable: 'Var',
type_annotation: 'Optional[mypy.types.Type]',
initializer: Optional[Expression],
kind: int) -> None:
super().__init__()
self.variable = variable
self.type_annotation = type_annotation
self.initializer = initializer
self.kind = kind # must be an ARG_* constant
def set_line(self,
target: Union[Context, int],
column: Optional[int] = None,
end_line: Optional[int] = None) -> None:
super().set_line(target, column, end_line)
if self.initializer and self.initializer.line < 0:
self.initializer.set_line(self.line, self.column, self.end_line)
self.variable.set_line(self.line, self.column, self.end_line)
FUNCITEM_FLAGS = FUNCBASE_FLAGS + [
'is_overload', 'is_generator', 'is_coroutine', 'is_async_generator',
'is_awaitable_coroutine',
] # type: Final
class FuncItem(FuncBase):
"""Base class for nodes usable as overloaded function items."""
__slots__ = ('arguments', # Note that can be None if deserialized (type is a lie!)
'arg_names', # Names of arguments
'arg_kinds', # Kinds of arguments
'min_args', # Minimum number of arguments
'max_pos', # Maximum number of positional arguments, -1 if no explicit
# limit (*args not included)
'body', # Body of the function
'is_overload', # Is this an overload variant of function with more than
# one overload variant?
'is_generator', # Contains a yield statement?
'is_coroutine', # Defined using 'async def' syntax?
'is_async_generator', # Is an async def generator?
'is_awaitable_coroutine', # Decorated with '@{typing,asyncio}.coroutine'?
'expanded', # Variants of function with type variables with values expanded
)
def __init__(self,
arguments: List[Argument],
body: 'Block',
typ: 'Optional[mypy.types.FunctionLike]' = None) -> None:
super().__init__()
self.arguments = arguments
self.arg_names = [arg.variable.name for arg in self.arguments]
self.arg_kinds = [arg.kind for arg in self.arguments] # type: List[int]
self.max_pos = self.arg_kinds.count(ARG_POS) + self.arg_kinds.count(ARG_OPT)
self.body = body
self.type = typ
self.unanalyzed_type = typ
self.is_overload = False
self.is_generator = False
self.is_coroutine = False
self.is_async_generator = False
self.is_awaitable_coroutine = False
self.expanded = [] # type: List[FuncItem]
self.min_args = 0
for i in range(len(self.arguments)):
if self.arguments[i] is None and i < self.max_fixed_argc():
self.min_args = i + 1
def max_fixed_argc(self) -> int:
return self.max_pos
def set_line(self,
target: Union[Context, int],
column: Optional[int] = None,
end_line: Optional[int] = None) -> None:
super().set_line(target, column, end_line)
for arg in self.arguments:
arg.set_line(self.line, self.column, self.end_line)
def is_dynamic(self) -> bool:
return self.type is None
FUNCDEF_FLAGS = FUNCITEM_FLAGS + [
'is_decorated', 'is_conditional', 'is_abstract',
] # type: Final
class FuncDef(FuncItem, SymbolNode, Statement):
"""Function definition.
This is a non-lambda function defined using 'def'.
"""
__slots__ = ('_name',
'is_decorated',
'is_conditional',
'is_abstract',
'original_def',
)
def __init__(self,
name: str, # Function name
arguments: List[Argument],
body: 'Block',
typ: 'Optional[mypy.types.FunctionLike]' = None) -> None:
super().__init__(arguments, body, typ)
self._name = name
self.is_decorated = False
self.is_conditional = False # Defined conditionally (within block)?
self.is_abstract = False
self.is_final = False
# Original conditional definition
self.original_def = None # type: Union[None, FuncDef, Var, Decorator]
@property
def name(self) -> str:
return self._name
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_func_def(self)
def serialize(self) -> JsonDict:
# We're deliberating omitting arguments and storing only arg_names and
# arg_kinds for space-saving reasons (arguments is not used in later
# stages of mypy).
# TODO: After a FuncDef is deserialized, the only time we use `arg_names`
# and `arg_kinds` is when `type` is None and we need to infer a type. Can
# we store the inferred type ahead of time?
return {'.class': 'FuncDef',
'name': self._name,
'fullname': self._fullname,
'arg_names': self.arg_names,
'arg_kinds': self.arg_kinds,
'type': None if self.type is None else self.type.serialize(),
'flags': get_flags(self, FUNCDEF_FLAGS),
# TODO: Do we need expanded, original_def?
}
@classmethod
def deserialize(cls, data: JsonDict) -> 'FuncDef':
assert data['.class'] == 'FuncDef'
body = Block([])
ret = FuncDef(data['name'],
[],
body,
(None if data['type'] is None
else cast(mypy.types.FunctionLike,
mypy.types.deserialize_type(data['type']))))
ret._fullname = data['fullname']
set_flags(ret, data['flags'])
# NOTE: ret.info is set in the fixup phase.
ret.arg_names = data['arg_names']
ret.arg_kinds = data['arg_kinds']
# Leave these uninitialized so that future uses will trigger an error
del ret.arguments
del ret.max_pos
del ret.min_args
return ret
# All types that are both SymbolNodes and FuncBases. See the FuncBase
# docstring for the rationale.
SYMBOL_FUNCBASE_TYPES = (OverloadedFuncDef, FuncDef)
class Decorator(SymbolNode, Statement):
"""A decorated function.
A single Decorator object can include any number of function decorators.
"""
func = None # type: FuncDef # Decorated function
decorators = None # type: List[Expression] # Decorators (may be empty)
# Some decorators are removed by semanal, keep the original here.
original_decorators = None # type: List[Expression]
# TODO: This is mostly used for the type; consider replacing with a 'type' attribute
var = None # type: Var # Represents the decorated function obj
is_overload = False
def __init__(self, func: FuncDef, decorators: List[Expression],
var: 'Var') -> None:
super().__init__()
self.func = func
self.decorators = decorators
self.original_decorators = decorators.copy()
self.var = var
self.is_overload = False
@property
def name(self) -> str:
return self.func.name
@property
def fullname(self) -> Bogus[str]:
return self.func.fullname
@property
def is_final(self) -> bool:
return self.func.is_final
@property
def info(self) -> 'TypeInfo':
return self.func.info
@property
def type(self) -> 'Optional[mypy.types.Type]':
return self.var.type
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_decorator(self)
def serialize(self) -> JsonDict:
return {'.class': 'Decorator',
'func': self.func.serialize(),
'var': self.var.serialize(),
'is_overload': self.is_overload,
}
@classmethod
def deserialize(cls, data: JsonDict) -> 'Decorator':
assert data['.class'] == 'Decorator'
dec = Decorator(FuncDef.deserialize(data['func']),
[],
Var.deserialize(data['var']))
dec.is_overload = data['is_overload']
return dec
VAR_FLAGS = [
'is_self', 'is_initialized_in_class', 'is_staticmethod',
'is_classmethod', 'is_property', 'is_settable_property', 'is_suppressed_import',
'is_classvar', 'is_abstract_var', 'is_final', 'final_unset_in_class', 'final_set_in_init',
'explicit_self_type', 'is_ready',
] # type: Final
class Var(SymbolNode):
"""A variable.
It can refer to global/local variable or a data attribute.
"""
__slots__ = ('_name',
'_fullname',
'info',
'type',
'final_value',
'is_self',
'is_ready',
'is_inferred',
'is_initialized_in_class',
'is_staticmethod',
'is_classmethod',
'is_property',
'is_settable_property',
'is_classvar',
'is_abstract_var',
'is_final',
'final_unset_in_class',
'final_set_in_init',
'is_suppressed_import',
'explicit_self_type',
'from_module_getattr',
)
def __init__(self, name: str, type: 'Optional[mypy.types.Type]' = None) -> None:
super().__init__()
self._name = name # Name without module prefix
# TODO: Should be Optional[str]
self._fullname = cast('Bogus[str]', None) # Name with module prefix
# TODO: Should be Optional[TypeInfo]
self.info = VAR_NO_INFO
self.type = type # type: Optional[mypy.types.Type] # Declared or inferred type, or None
# Is this the first argument to an ordinary method (usually "self")?
self.is_self = False
self.is_ready = True # If inferred, is the inferred type available?
self.is_inferred = (self.type is None)
# Is this initialized explicitly to a non-None value in class body?
self.is_initialized_in_class = False
self.is_staticmethod = False
self.is_classmethod = False
self.is_property = False
self.is_settable_property = False
self.is_classvar = False
self.is_abstract_var = False
# Set to true when this variable refers to a module we were unable to
# parse for some reason (eg a silenced module)
self.is_suppressed_import = False
# Was this "variable" (rather a constant) defined as Final[...]?
self.is_final = False
# If constant value is a simple literal,
# store the literal value (unboxed) for the benefit of
# tools like mypyc.
self.final_value = None # type: Optional[Union[int, float, bool, str]]
# Where the value was set (only for class attributes)
self.final_unset_in_class = False
self.final_set_in_init = False
# This is True for a variable that was declared on self with an explicit type:
# class C:
# def __init__(self) -> None:
# self.x: int
# This case is important because this defines a new Var, even if there is one
# present in a superclass (without explicit type this doesn't create a new Var).
# See SemanticAnalyzer.analyze_member_lvalue() for details.
self.explicit_self_type = False
# If True, this is an implicit Var created due to module-level __getattr__.
self.from_module_getattr = False
@property
def name(self) -> str:
return self._name
@property
def fullname(self) -> Bogus[str]:
return self._fullname
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_var(self)
def serialize(self) -> JsonDict:
# TODO: Leave default values out?
# NOTE: Sometimes self.is_ready is False here, but we don't care.
data = {'.class': 'Var',
'name': self._name,
'fullname': self._fullname,
'type': None if self.type is None else self.type.serialize(),
'flags': get_flags(self, VAR_FLAGS),
} # type: JsonDict
if self.final_value is not None:
data['final_value'] = self.final_value
return data
@classmethod
def deserialize(cls, data: JsonDict) -> 'Var':
assert data['.class'] == 'Var'
name = data['name']
type = None if data['type'] is None else mypy.types.deserialize_type(data['type'])
v = Var(name, type)
v.is_ready = False # Override True default set in __init__
v._fullname = data['fullname']
set_flags(v, data['flags'])
v.final_value = data.get('final_value')
return v
class ClassDef(Statement):
"""Class definition"""
name = None # type: str # Name of the class without module prefix
fullname = None # type: Bogus[str] # Fully qualified name of the class
defs = None # type: Block
type_vars = None # type: List[mypy.types.TypeVarDef]
# Base class expressions (not semantically analyzed -- can be arbitrary expressions)
base_type_exprs = None # type: List[Expression]
# Special base classes like Generic[...] get moved here during semantic analysis
removed_base_type_exprs = None # type: List[Expression]
info = None # type: TypeInfo # Related TypeInfo
metaclass = None # type: Optional[Expression]
decorators = None # type: List[Expression]
keywords = None # type: OrderedDict[str, Expression]
analyzed = None # type: Optional[Expression]
has_incompatible_baseclass = False
def __init__(self,
name: str,
defs: 'Block',
type_vars: Optional[List['mypy.types.TypeVarDef']] = None,
base_type_exprs: Optional[List[Expression]] = None,
metaclass: Optional[Expression] = None,
keywords: Optional[List[Tuple[str, Expression]]] = None) -> None:
super().__init__()
self.name = name
self.defs = defs
self.type_vars = type_vars or []
self.base_type_exprs = base_type_exprs or []
self.removed_base_type_exprs = []
self.info = CLASSDEF_NO_INFO
self.metaclass = metaclass
self.decorators = []
self.keywords = OrderedDict(keywords or [])
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_class_def(self)
def is_generic(self) -> bool:
return self.info.is_generic()
def serialize(self) -> JsonDict:
# Not serialized: defs, base_type_exprs, metaclass, decorators,
# analyzed (for named tuples etc.)
return {'.class': 'ClassDef',
'name': self.name,
'fullname': self.fullname,
'type_vars': [v.serialize() for v in self.type_vars],
}
@classmethod
def deserialize(self, data: JsonDict) -> 'ClassDef':
assert data['.class'] == 'ClassDef'
res = ClassDef(data['name'],
Block([]),
[mypy.types.TypeVarDef.deserialize(v) for v in data['type_vars']],
)
res.fullname = data['fullname']
return res
class GlobalDecl(Statement):
"""Declaration global x, y, ..."""
names = None # type: List[str]
def __init__(self, names: List[str]) -> None:
super().__init__()
self.names = names
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_global_decl(self)
class NonlocalDecl(Statement):
"""Declaration nonlocal x, y, ..."""
names = None # type: List[str]
def __init__(self, names: List[str]) -> None:
super().__init__()
self.names = names
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_nonlocal_decl(self)
class Block(Statement):
__slots__ = ('body', 'is_unreachable')
def __init__(self, body: List[Statement]) -> None:
super().__init__()
self.body = body
# True if we can determine that this block is not executed during semantic
# analysis. For example, this applies to blocks that are protected by
# something like "if PY3:" when using Python 2. However, some code is
# only considered unreachable during type checking and this is not true
# in those cases.
self.is_unreachable = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_block(self)
# Statements
class ExpressionStmt(Statement):
"""An expression as a statement, such as print(s)."""
expr = None # type: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_expression_stmt(self)
class AssignmentStmt(Statement):
"""Assignment statement.
The same node class is used for single assignment, multiple assignment
(e.g. x, y = z) and chained assignment (e.g. x = y = z), assignments
that define new names, and assignments with explicit types ("# type: t"
or "x: t [= ...]").
An lvalue can be NameExpr, TupleExpr, ListExpr, MemberExpr, or IndexExpr.
"""
lvalues = None # type: List[Lvalue]
# This is a TempNode if and only if no rvalue (x: t).
rvalue = None # type: Expression
# Declared type in a comment, may be None.
type = None # type: Optional[mypy.types.Type]
# Original, not semantically analyzed type in annotation (used for reprocessing)
unanalyzed_type = None # type: Optional[mypy.types.Type]
# This indicates usage of PEP 526 type annotation syntax in assignment.
new_syntax = False # type: bool
# Does this assignment define a type alias?
is_alias_def = False
# Is this a final definition?
# Final attributes can't be re-assigned once set, and can't be overridden
# in a subclass. This flag is not set if an attempted declaration was found to
# be invalid during semantic analysis. It is still set to `True` if
# a final declaration overrides another final declaration (this is checked
# during type checking when MROs are known).
is_final_def = False
def __init__(self, lvalues: List[Lvalue], rvalue: Expression,
type: 'Optional[mypy.types.Type]' = None, new_syntax: bool = False) -> None:
super().__init__()
self.lvalues = lvalues
self.rvalue = rvalue
self.type = type
self.unanalyzed_type = type
self.new_syntax = new_syntax
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_assignment_stmt(self)
class OperatorAssignmentStmt(Statement):
"""Operator assignment statement such as x += 1"""
op = ''
lvalue = None # type: Lvalue
rvalue = None # type: Expression
def __init__(self, op: str, lvalue: Lvalue, rvalue: Expression) -> None:
super().__init__()
self.op = op
self.lvalue = lvalue
self.rvalue = rvalue
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_operator_assignment_stmt(self)
class WhileStmt(Statement):
expr = None # type: Expression
body = None # type: Block
else_body = None # type: Optional[Block]
def __init__(self, expr: Expression, body: Block, else_body: Optional[Block]) -> None:
super().__init__()
self.expr = expr
self.body = body
self.else_body = else_body
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_while_stmt(self)
class ForStmt(Statement):
# Index variables
index = None # type: Lvalue
# Type given by type comments for index, can be None
index_type = None # type: Optional[mypy.types.Type]
# Original, not semantically analyzed type in annotation (used for reprocessing)
unanalyzed_index_type = None # type: Optional[mypy.types.Type]
# Inferred iterable item type
inferred_item_type = None # type: Optional[mypy.types.Type]
# Inferred iterator type
inferred_iterator_type = None # type: Optional[mypy.types.Type]
# Expression to iterate
expr = None # type: Expression
body = None # type: Block
else_body = None # type: Optional[Block]
is_async = False # True if `async for ...` (PEP 492, Python 3.5)
def __init__(self,
index: Lvalue,
expr: Expression,
body: Block,
else_body: Optional[Block],
index_type: 'Optional[mypy.types.Type]' = None) -> None:
super().__init__()
self.index = index
self.index_type = index_type
self.unanalyzed_index_type = index_type
self.expr = expr
self.body = body
self.else_body = else_body
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_for_stmt(self)
class ReturnStmt(Statement):
expr = None # type: Optional[Expression]
def __init__(self, expr: Optional[Expression]) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_return_stmt(self)
class AssertStmt(Statement):
expr = None # type: Expression
msg = None # type: Optional[Expression]
def __init__(self, expr: Expression, msg: Optional[Expression] = None) -> None:
super().__init__()
self.expr = expr
self.msg = msg
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_assert_stmt(self)
class DelStmt(Statement):
expr = None # type: Lvalue
def __init__(self, expr: Lvalue) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_del_stmt(self)
class BreakStmt(Statement):
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_break_stmt(self)
class ContinueStmt(Statement):
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_continue_stmt(self)
class PassStmt(Statement):
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_pass_stmt(self)
class IfStmt(Statement):
expr = None # type: List[Expression]
body = None # type: List[Block]
else_body = None # type: Optional[Block]
def __init__(self, expr: List[Expression], body: List[Block],
else_body: Optional[Block]) -> None:
super().__init__()
self.expr = expr
self.body = body
self.else_body = else_body
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_if_stmt(self)
class RaiseStmt(Statement):
# Plain 'raise' is a valid statement.
expr = None # type: Optional[Expression]
from_expr = None # type: Optional[Expression]
def __init__(self, expr: Optional[Expression], from_expr: Optional[Expression]) -> None:
super().__init__()
self.expr = expr
self.from_expr = from_expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_raise_stmt(self)
class TryStmt(Statement):
body = None # type: Block # Try body
# Plain 'except:' also possible
types = None # type: List[Optional[Expression]] # Except type expressions
vars = None # type: List[Optional[NameExpr]] # Except variable names
handlers = None # type: List[Block] # Except bodies
else_body = None # type: Optional[Block]
finally_body = None # type: Optional[Block]
def __init__(self, body: Block, vars: List['Optional[NameExpr]'],
types: List[Optional[Expression]],
handlers: List[Block], else_body: Optional[Block],
finally_body: Optional[Block]) -> None:
super().__init__()
self.body = body
self.vars = vars
self.types = types
self.handlers = handlers
self.else_body = else_body
self.finally_body = finally_body
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_try_stmt(self)
class WithStmt(Statement):
expr = None # type: List[Expression]
target = None # type: List[Optional[Lvalue]]
# Type given by type comments for target, can be None
unanalyzed_type = None # type: Optional[mypy.types.Type]
# Semantically analyzed types from type comment (TypeList type expanded)
analyzed_types = None # type: List[mypy.types.Type]
body = None # type: Block
is_async = False # True if `async with ...` (PEP 492, Python 3.5)
def __init__(self, expr: List[Expression], target: List[Optional[Lvalue]],
body: Block, target_type: 'Optional[mypy.types.Type]' = None) -> None:
super().__init__()
self.expr = expr
self.target = target
self.unanalyzed_type = target_type
self.analyzed_types = []
self.body = body
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_with_stmt(self)
class PrintStmt(Statement):
"""Python 2 print statement"""
args = None # type: List[Expression]
newline = False
# The file-like target object (given using >>).
target = None # type: Optional[Expression]
def __init__(self,
args: List[Expression],
newline: bool,
target: Optional[Expression] = None) -> None:
super().__init__()
self.args = args
self.newline = newline
self.target = target
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_print_stmt(self)
class ExecStmt(Statement):
"""Python 2 exec statement"""
expr = None # type: Expression
globals = None # type: Optional[Expression]
locals = None # type: Optional[Expression]
def __init__(self, expr: Expression,
globals: Optional[Expression],
locals: Optional[Expression]) -> None:
super().__init__()
self.expr = expr
self.globals = globals
self.locals = locals
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_exec_stmt(self)
# Expressions
class IntExpr(Expression):
"""Integer literal"""
value = 0
def __init__(self, value: int) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_int_expr(self)
# How mypy uses StrExpr, BytesExpr, and UnicodeExpr:
# In Python 2 mode:
# b'x', 'x' -> StrExpr
# u'x' -> UnicodeExpr
# BytesExpr is unused
#
# In Python 3 mode:
# b'x' -> BytesExpr
# 'x', u'x' -> StrExpr
# UnicodeExpr is unused
class StrExpr(Expression):
"""String literal"""
value = ''
# Keeps track of whether this string originated from Python 2 source code vs
# Python 3 source code. We need to keep track of this information so we can
# correctly handle types that have "nested strings". For example, consider this
# type alias, where we have a forward reference to a literal type:
#
# Alias = List["Literal['foo']"]
#
# When parsing this, we need to know whether the outer string and alias came from
# Python 2 code vs Python 3 code so we can determine whether the inner `Literal['foo']`
# is meant to be `Literal[u'foo']` or `Literal[b'foo']`.
#
# This field keeps track of that information.
from_python_3 = True
def __init__(self, value: str, from_python_3: bool = False) -> None:
super().__init__()
self.value = value
self.from_python_3 = from_python_3
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_str_expr(self)
class BytesExpr(Expression):
"""Bytes literal"""
# Note: we deliberately do NOT use bytes here because it ends up
# unnecessarily complicating a lot of the result logic. For example,
# we'd have to worry about converting the bytes into a format we can
# easily serialize/deserialize to and from JSON, would have to worry
# about turning the bytes into a human-readable representation in
# error messages...
#
# It's more convenient to just store the human-readable representation
# from the very start.
value = ''
def __init__(self, value: str) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_bytes_expr(self)
class UnicodeExpr(Expression):
"""Unicode literal (Python 2.x)"""
value = ''
def __init__(self, value: str) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_unicode_expr(self)
class FloatExpr(Expression):
"""Float literal"""
value = 0.0
def __init__(self, value: float) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_float_expr(self)
class ComplexExpr(Expression):
"""Complex literal"""
def __init__(self, value: complex) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_complex_expr(self)
class EllipsisExpr(Expression):
"""Ellipsis (...)"""
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_ellipsis(self)
class StarExpr(Expression):
"""Star expression"""
expr = None # type: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
# Whether this starred expression is used in a tuple/list and as lvalue
self.valid = False
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_star_expr(self)
class RefExpr(Expression):
"""Abstract base class for name-like constructs"""
__slots__ = ('kind', 'node', 'fullname', 'is_new_def', 'is_inferred_def', 'is_alias_rvalue')
def __init__(self) -> None:
super().__init__()
# LDEF/GDEF/MDEF/... (None if not available)
self.kind = None # type: Optional[int]
# Var, FuncDef or TypeInfo that describes this
self.node = None # type: Optional[SymbolNode]
# Fully qualified name (or name if not global)
self.fullname = None # type: Optional[str]
# Does this define a new name?
self.is_new_def = False
# Does this define a new name with inferred type?
#
# For members, after semantic analysis, this does not take base
# classes into consideration at all; the type checker deals with these.
self.is_inferred_def = False
# Is this expression appears as an rvalue of a valid type alias definition?
self.is_alias_rvalue = False
class NameExpr(RefExpr):
"""Name expression
This refers to a local name, global name or a module.
"""
__slots__ = ('name', 'is_special_form')
def __init__(self, name: str) -> None:
super().__init__()
self.name = name # Name referred to (may be qualified)
# Is this a l.h.s. of a special form assignment like typed dict or type variable?
self.is_special_form = False
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_name_expr(self)
def serialize(self) -> JsonDict:
assert False, "Serializing NameExpr: %s" % (self,)
class MemberExpr(RefExpr):
"""Member access expression x.y"""
__slots__ = ('expr', 'name', 'def_var')
def __init__(self, expr: Expression, name: str) -> None:
super().__init__()
self.expr = expr
self.name = name
# The variable node related to a definition through 'self.x = <initializer>'.
# The nodes of other kinds of member expressions are resolved during type checking.
self.def_var = None # type: Optional[Var]
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_member_expr(self)
# Kinds of arguments
# Positional argument
ARG_POS = 0 # type: Final[int]
# Positional, optional argument (functions only, not calls)
ARG_OPT = 1 # type: Final[int]
# *arg argument
ARG_STAR = 2 # type: Final[int]
# Keyword argument x=y in call, or keyword-only function arg
ARG_NAMED = 3 # type: Final[int]
# **arg argument
ARG_STAR2 = 4 # type: Final[int]
# In an argument list, keyword-only and also optional
ARG_NAMED_OPT = 5 # type: Final[int]
class CallExpr(Expression):
"""Call expression.
This can also represent several special forms that are syntactically calls
such as cast(...) and None # type: ....
"""
__slots__ = ('callee', 'args', 'arg_kinds', 'arg_names', 'analyzed')
def __init__(self,
callee: Expression,
args: List[Expression],
arg_kinds: List[int],
arg_names: List[Optional[str]],
analyzed: Optional[Expression] = None) -> None:
super().__init__()
if not arg_names:
arg_names = [None] * len(args)
self.callee = callee
self.args = args
self.arg_kinds = arg_kinds # ARG_ constants
# Each name can be None if not a keyword argument.
self.arg_names = arg_names # type: List[Optional[str]]
# If not None, the node that represents the meaning of the CallExpr. For
# cast(...) this is a CastExpr.
self.analyzed = analyzed
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_call_expr(self)
class YieldFromExpr(Expression):
expr = None # type: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_yield_from_expr(self)
class YieldExpr(Expression):
expr = None # type: Optional[Expression]
def __init__(self, expr: Optional[Expression]) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_yield_expr(self)
class IndexExpr(Expression):
"""Index expression x[y].
Also wraps type application such as List[int] as a special form.
"""
base = None # type: Expression
index = None # type: Expression
# Inferred __getitem__ method type
method_type = None # type: Optional[mypy.types.Type]
# If not None, this is actually semantically a type application
# Class[type, ...] or a type alias initializer.
analyzed = None # type: Union[TypeApplication, TypeAliasExpr, None]
def __init__(self, base: Expression, index: Expression) -> None:
super().__init__()
self.base = base
self.index = index
self.analyzed = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_index_expr(self)
class UnaryExpr(Expression):
"""Unary operation"""
op = ''
expr = None # type: Expression
# Inferred operator method type
method_type = None # type: Optional[mypy.types.Type]
def __init__(self, op: str, expr: Expression) -> None:
super().__init__()
self.op = op
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_unary_expr(self)
class AssignmentExpr(Expression):
"""Assignment expressions in Python 3.8+, like "a := 2"."""
def __init__(self, target: Expression, value: Expression) -> None:
super().__init__()
self.target = target
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_assignment_expr(self)
# Map from binary operator id to related method name (in Python 3).
op_methods = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__truediv__',
'%': '__mod__',
'divmod': '__divmod__',
'//': '__floordiv__',
'**': '__pow__',
'@': '__matmul__',
'&': '__and__',
'|': '__or__',
'^': '__xor__',
'<<': '__lshift__',
'>>': '__rshift__',
'==': '__eq__',
'!=': '__ne__',
'<': '__lt__',
'>=': '__ge__',
'>': '__gt__',
'<=': '__le__',
'in': '__contains__',
} # type: Final[Dict[str, str]]
op_methods_to_symbols = {v: k for (k, v) in op_methods.items()} # type: Final
op_methods_to_symbols['__div__'] = '/'
comparison_fallback_method = '__cmp__' # type: Final
ops_falling_back_to_cmp = {'__ne__', '__eq__',
'__lt__', '__le__',
'__gt__', '__ge__'} # type: Final
ops_with_inplace_method = {
'+', '-', '*', '/', '%', '//', '**', '@', '&', '|', '^', '<<', '>>'} # type: Final
inplace_operator_methods = set(
'__i' + op_methods[op][2:] for op in ops_with_inplace_method) # type: Final
reverse_op_methods = {
'__add__': '__radd__',
'__sub__': '__rsub__',
'__mul__': '__rmul__',
'__truediv__': '__rtruediv__',
'__mod__': '__rmod__',
'__divmod__': '__rdivmod__',
'__floordiv__': '__rfloordiv__',
'__pow__': '__rpow__',
'__matmul__': '__rmatmul__',
'__and__': '__rand__',
'__or__': '__ror__',
'__xor__': '__rxor__',
'__lshift__': '__rlshift__',
'__rshift__': '__rrshift__',
'__eq__': '__eq__',
'__ne__': '__ne__',
'__lt__': '__gt__',
'__ge__': '__le__',
'__gt__': '__lt__',
'__le__': '__ge__',
} # type: Final
# Suppose we have some class A. When we do A() + A(), Python will only check
# the output of A().__add__(A()) and skip calling the __radd__ method entirely.
# This shortcut is used only for the following methods:
op_methods_that_shortcut = {
'__add__',
'__sub__',
'__mul__',
'__div__',
'__truediv__',
'__mod__',
'__divmod__',
'__floordiv__',
'__pow__',
'__matmul__',
'__and__',
'__or__',
'__xor__',
'__lshift__',
'__rshift__',
} # type: Final
normal_from_reverse_op = dict((m, n) for n, m in reverse_op_methods.items()) # type: Final
reverse_op_method_set = set(reverse_op_methods.values()) # type: Final
unary_op_methods = {
'-': '__neg__',
'+': '__pos__',
'~': '__invert__',
} # type: Final
class OpExpr(Expression):
"""Binary operation (other than . or [] or comparison operators,
which have specific nodes)."""
op = ''
left = None # type: Expression
right = None # type: Expression
# Inferred type for the operator method type (when relevant).
method_type = None # type: Optional[mypy.types.Type]
# Is the right side going to be evaluated every time?
right_always = False
# Is the right side unreachable?
right_unreachable = False
def __init__(self, op: str, left: Expression, right: Expression) -> None:
super().__init__()
self.op = op
self.left = left
self.right = right
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_op_expr(self)
class ComparisonExpr(Expression):
"""Comparison expression (e.g. a < b > c < d)."""
operators = None # type: List[str]
operands = None # type: List[Expression]
# Inferred type for the operator methods (when relevant; None for 'is').
method_types = None # type: List[Optional[mypy.types.Type]]
def __init__(self, operators: List[str], operands: List[Expression]) -> None:
super().__init__()
self.operators = operators
self.operands = operands
self.method_types = []
def pairwise(self) -> Iterator[Tuple[str, Expression, Expression]]:
"""If this comparison expr is "a < b is c == d", yields the sequence
("<", a, b), ("is", b, c), ("==", c, d)
"""
for i, operator in enumerate(self.operators):
yield operator, self.operands[i], self.operands[i + 1]
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_comparison_expr(self)
class SliceExpr(Expression):
"""Slice expression (e.g. 'x:y', 'x:', '::2' or ':').
This is only valid as index in index expressions.
"""
begin_index = None # type: Optional[Expression]
end_index = None # type: Optional[Expression]
stride = None # type: Optional[Expression]
def __init__(self, begin_index: Optional[Expression],
end_index: Optional[Expression],
stride: Optional[Expression]) -> None:
super().__init__()
self.begin_index = begin_index
self.end_index = end_index
self.stride = stride
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_slice_expr(self)
class CastExpr(Expression):
"""Cast expression cast(type, expr)."""
expr = None # type: Expression
type = None # type: mypy.types.Type
def __init__(self, expr: Expression, typ: 'mypy.types.Type') -> None:
super().__init__()
self.expr = expr
self.type = typ
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_cast_expr(self)
class RevealExpr(Expression):
"""Reveal type expression reveal_type(expr) or reveal_locals() expression."""
expr = None # type: Optional[Expression]
kind = 0 # type: int
local_nodes = None # type: Optional[List[Var]]
def __init__(
self, kind: int,
expr: Optional[Expression] = None,
local_nodes: 'Optional[List[Var]]' = None) -> None:
super().__init__()
self.expr = expr
self.kind = kind
self.local_nodes = local_nodes
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_reveal_expr(self)
class SuperExpr(Expression):
"""Expression super().name"""
name = ''
info = None # type: Optional[TypeInfo] # Type that contains this super expression
call = None # type: CallExpr # The expression super(...)
def __init__(self, name: str, call: CallExpr) -> None:
super().__init__()
self.name = name
self.call = call
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_super_expr(self)
class LambdaExpr(FuncItem, Expression):
"""Lambda expression"""
@property
def name(self) -> str:
return '<lambda>'
def expr(self) -> Expression:
"""Return the expression (the body) of the lambda."""
ret = cast(ReturnStmt, self.body.body[-1])
expr = ret.expr
assert expr is not None # lambda can't have empty body
return expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_lambda_expr(self)
def is_dynamic(self) -> bool:
return False
class ListExpr(Expression):
"""List literal expression [...]."""
items = None # type: List[Expression]
def __init__(self, items: List[Expression]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_list_expr(self)
class DictExpr(Expression):
"""Dictionary literal expression {key: value, ...}."""
items = None # type: List[Tuple[Optional[Expression], Expression]]
def __init__(self, items: List[Tuple[Optional[Expression], Expression]]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_dict_expr(self)
class TupleExpr(Expression):
"""Tuple literal expression (..., ...)
Also lvalue sequences (..., ...) and [..., ...]"""
items = None # type: List[Expression]
def __init__(self, items: List[Expression]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_tuple_expr(self)
class SetExpr(Expression):
"""Set literal expression {value, ...}."""
items = None # type: List[Expression]
def __init__(self, items: List[Expression]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_set_expr(self)
class GeneratorExpr(Expression):
"""Generator expression ... for ... in ... [ for ... in ... ] [ if ... ]."""
left_expr = None # type: Expression
sequences = None # type: List[Expression]
condlists = None # type: List[List[Expression]]
is_async = None # type: List[bool]
indices = None # type: List[Lvalue]
def __init__(self, left_expr: Expression, indices: List[Lvalue],
sequences: List[Expression], condlists: List[List[Expression]],
is_async: List[bool]) -> None:
super().__init__()
self.left_expr = left_expr
self.sequences = sequences
self.condlists = condlists
self.indices = indices
self.is_async = is_async
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_generator_expr(self)
class ListComprehension(Expression):
"""List comprehension (e.g. [x + 1 for x in a])"""
generator = None # type: GeneratorExpr
def __init__(self, generator: GeneratorExpr) -> None:
super().__init__()
self.generator = generator
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_list_comprehension(self)
class SetComprehension(Expression):
"""Set comprehension (e.g. {x + 1 for x in a})"""
generator = None # type: GeneratorExpr
def __init__(self, generator: GeneratorExpr) -> None:
super().__init__()
self.generator = generator
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_set_comprehension(self)
class DictionaryComprehension(Expression):
"""Dictionary comprehension (e.g. {k: v for k, v in a}"""
key = None # type: Expression
value = None # type: Expression
sequences = None # type: List[Expression]
condlists = None # type: List[List[Expression]]
is_async = None # type: List[bool]
indices = None # type: List[Lvalue]
def __init__(self, key: Expression, value: Expression, indices: List[Lvalue],
sequences: List[Expression], condlists: List[List[Expression]],
is_async: List[bool]) -> None:
super().__init__()
self.key = key
self.value = value
self.sequences = sequences
self.condlists = condlists
self.indices = indices
self.is_async = is_async
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_dictionary_comprehension(self)
class ConditionalExpr(Expression):
"""Conditional expression (e.g. x if y else z)"""
cond = None # type: Expression
if_expr = None # type: Expression
else_expr = None # type: Expression
def __init__(self, cond: Expression, if_expr: Expression, else_expr: Expression) -> None:
super().__init__()
self.cond = cond
self.if_expr = if_expr
self.else_expr = else_expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_conditional_expr(self)
class BackquoteExpr(Expression):
"""Python 2 expression `...`."""
expr = None # type: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_backquote_expr(self)
class TypeApplication(Expression):
"""Type application expr[type, ...]"""
expr = None # type: Expression
types = None # type: List[mypy.types.Type]
def __init__(self, expr: Expression, types: List['mypy.types.Type']) -> None:
super().__init__()
self.expr = expr
self.types = types
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_application(self)
# Variance of a type variable. For example, T in the definition of
# List[T] is invariant, so List[int] is not a subtype of List[object],
# and also List[object] is not a subtype of List[int].
#
# The T in Iterable[T] is covariant, so Iterable[int] is a subtype of
# Iterable[object], but not vice versa.
#
# If T is contravariant in Foo[T], Foo[object] is a subtype of
# Foo[int], but not vice versa.
INVARIANT = 0 # type: Final[int]
COVARIANT = 1 # type: Final[int]
CONTRAVARIANT = 2 # type: Final[int]
class TypeVarLikeExpr(SymbolNode, Expression):
"""Base class for TypeVarExpr and ParamSpecExpr."""
_name = ''
_fullname = ''
# Upper bound: only subtypes of upper_bound are valid as values. By default
# this is 'object', meaning no restriction.
upper_bound = None # type: mypy.types.Type
# Variance of the type variable. Invariant is the default.
# TypeVar(..., covariant=True) defines a covariant type variable.
# TypeVar(..., contravariant=True) defines a contravariant type
# variable.
variance = INVARIANT
def __init__(
self, name: str, fullname: str, upper_bound: 'mypy.types.Type', variance: int = INVARIANT
) -> None:
super().__init__()
self._name = name
self._fullname = fullname
self.upper_bound = upper_bound
self.variance = variance
@property
def name(self) -> str:
return self._name
@property
def fullname(self) -> str:
return self._fullname
class TypeVarExpr(TypeVarLikeExpr):
"""Type variable expression TypeVar(...).
This is also used to represent type variables in symbol tables.
A type variable is not valid as a type unless bound in a TypeVarScope.
That happens within:
1. a generic class that uses the type variable as a type argument or
2. a generic function that refers to the type variable in its signature.
"""
# Value restriction: only types in the list are valid as values. If the
# list is empty, there is no restriction.
values = None # type: List[mypy.types.Type]
def __init__(self, name: str, fullname: str,
values: List['mypy.types.Type'],
upper_bound: 'mypy.types.Type',
variance: int = INVARIANT) -> None:
super().__init__(name, fullname, upper_bound, variance)
self.values = values
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_var_expr(self)
def serialize(self) -> JsonDict:
return {'.class': 'TypeVarExpr',
'name': self._name,
'fullname': self._fullname,
'values': [t.serialize() for t in self.values],
'upper_bound': self.upper_bound.serialize(),
'variance': self.variance,
}
@classmethod
def deserialize(cls, data: JsonDict) -> 'TypeVarExpr':
assert data['.class'] == 'TypeVarExpr'
return TypeVarExpr(data['name'],
data['fullname'],
[mypy.types.deserialize_type(v) for v in data['values']],
mypy.types.deserialize_type(data['upper_bound']),
data['variance'])
class ParamSpecExpr(TypeVarLikeExpr):
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_paramspec_expr(self)
def serialize(self) -> JsonDict:
return {
'.class': 'ParamSpecExpr',
'name': self._name,
'fullname': self._fullname,
'upper_bound': self.upper_bound.serialize(),
'variance': self.variance,
}
@classmethod
def deserialize(cls, data: JsonDict) -> 'ParamSpecExpr':
assert data['.class'] == 'ParamSpecExpr'
return ParamSpecExpr(
data['name'],
data['fullname'],
mypy.types.deserialize_type(data['upper_bound']),
data['variance']
)
class TypeAliasExpr(Expression):
"""Type alias expression (rvalue)."""
# The target type.
type = None # type: mypy.types.Type
# Names of unbound type variables used to define the alias
tvars = None # type: List[str]
# Whether this alias was defined in bare form. Used to distinguish
# between
# A = List
# and
# A = List[Any]
no_args = False # type: bool
def __init__(self, node: 'TypeAlias') -> None:
super().__init__()
self.type = node.target
self.tvars = node.alias_tvars
self.no_args = node.no_args
self.node = node
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_alias_expr(self)
class NamedTupleExpr(Expression):
"""Named tuple expression namedtuple(...) or NamedTuple(...)."""
# The class representation of this named tuple (its tuple_type attribute contains
# the tuple item types)
info = None # type: TypeInfo
is_typed = False # whether this class was created with typing.NamedTuple
def __init__(self, info: 'TypeInfo', is_typed: bool = False) -> None:
super().__init__()
self.info = info
self.is_typed = is_typed
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_namedtuple_expr(self)
class TypedDictExpr(Expression):
"""Typed dict expression TypedDict(...)."""
# The class representation of this typed dict
info = None # type: TypeInfo
def __init__(self, info: 'TypeInfo') -> None:
super().__init__()
self.info = info
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_typeddict_expr(self)
class EnumCallExpr(Expression):
"""Named tuple expression Enum('name', 'val1 val2 ...')."""
# The class representation of this enumerated type
info = None # type: TypeInfo
# The item names (for debugging)
items = None # type: List[str]
values = None # type: List[Optional[Expression]]
def __init__(self, info: 'TypeInfo', items: List[str],
values: List[Optional[Expression]]) -> None:
super().__init__()
self.info = info
self.items = items
self.values = values
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_enum_call_expr(self)
class PromoteExpr(Expression):
"""Ducktype class decorator expression _promote(...)."""
type = None # type: mypy.types.Type
def __init__(self, type: 'mypy.types.Type') -> None:
super().__init__()
self.type = type
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit__promote_expr(self)
class NewTypeExpr(Expression):
"""NewType expression NewType(...)."""
name = None # type: str
# The base type (the second argument to NewType)
old_type = None # type: Optional[mypy.types.Type]
# The synthesized class representing the new type (inherits old_type)
info = None # type: Optional[TypeInfo]
def __init__(self, name: str, old_type: 'Optional[mypy.types.Type]', line: int,
column: int) -> None:
super().__init__()
self.name = name
self.old_type = old_type
self.line = line
self.column = column
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_newtype_expr(self)
class AwaitExpr(Expression):
"""Await expression (await ...)."""
expr = None # type: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_await_expr(self)
# Constants
class TempNode(Expression):
"""Temporary dummy node used during type checking.
This node is not present in the original program; it is just an artifact
of the type checker implementation. It only represents an opaque node with
some fixed type.
"""
type = None # type: mypy.types.Type
# Is this TempNode used to indicate absence of a right hand side in an annotated assignment?
# (e.g. for 'x: int' the rvalue is TempNode(AnyType(TypeOfAny.special_form), no_rhs=True))
no_rhs = False # type: bool
def __init__(self,
typ: 'mypy.types.Type',
no_rhs: bool = False,
*,
context: Optional[Context] = None) -> None:
"""Construct a dummy node; optionally borrow line/column from context object."""
super().__init__()
self.type = typ
self.no_rhs = no_rhs
if context is not None:
self.line = context.line
self.column = context.column
def __repr__(self) -> str:
return 'TempNode:%d(%s)' % (self.line, str(self.type))
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_temp_node(self)
class TypeInfo(SymbolNode):
"""The type structure of a single class.
Each TypeInfo corresponds one-to-one to a ClassDef, which
represents the AST of the class.
In type-theory terms, this is a "type constructor", and if the
class is generic then it will be a type constructor of higher kind.
Where the class is used in an actual type, it's in the form of an
Instance, which amounts to a type application of the tycon to
the appropriate number of arguments.
"""
_fullname = None # type: Bogus[str] # Fully qualified name
# Fully qualified name for the module this type was defined in. This
# information is also in the fullname, but is harder to extract in the
# case of nested class definitions.
module_name = None # type: str
defn = None # type: ClassDef # Corresponding ClassDef
# Method Resolution Order: the order of looking up attributes. The first
# value always to refers to this class.
mro = None # type: List[TypeInfo]
# Used to stash the names of the mro classes temporarily between
# deserialization and fixup. See deserialize() for why.
_mro_refs = None # type: Optional[List[str]]
bad_mro = False # Could not construct full MRO
declared_metaclass = None # type: Optional[mypy.types.Instance]
metaclass_type = None # type: Optional[mypy.types.Instance]
names = None # type: SymbolTable # Names defined directly in this type
is_abstract = False # Does the class have any abstract attributes?
is_protocol = False # Is this a protocol class?
runtime_protocol = False # Does this protocol support isinstance checks?
abstract_attributes = None # type: List[str]
# The attributes 'assuming' and 'assuming_proper' represent structural subtype matrices.
#
# In languages with structural subtyping, one can keep a global subtype matrix like this:
# . A B C .
# A 1 0 0
# B 1 1 1
# C 1 0 1
# .
# where 1 indicates that the type in corresponding row is a subtype of the type
# in corresponding column. This matrix typically starts filled with all 1's and
# a typechecker tries to "disprove" every subtyping relation using atomic (or nominal) types.
# However, we don't want to keep this huge global state. Instead, we keep the subtype
# information in the form of list of pairs (subtype, supertype) shared by all 'Instance's
# with given supertype's TypeInfo. When we enter a subtype check we push a pair in this list
# thus assuming that we started with 1 in corresponding matrix element. Such algorithm allows
# to treat recursive and mutually recursive protocols and other kinds of complex situations.
#
# If concurrent/parallel type checking will be added in future,
# then there should be one matrix per thread/process to avoid false negatives
# during the type checking phase.
assuming = None # type: List[Tuple[mypy.types.Instance, mypy.types.Instance]]
assuming_proper = None # type: List[Tuple[mypy.types.Instance, mypy.types.Instance]]
# Ditto for temporary 'inferring' stack of recursive constraint inference.
# It contains Instance's of protocol types that appeared as an argument to
# constraints.infer_constraints(). We need 'inferring' to avoid infinite recursion for
# recursive and mutually recursive protocols.
#
# We make 'assuming' and 'inferring' attributes here instead of passing they as kwargs,
# since this would require to pass them in many dozens of calls. In particular,
# there is a dependency infer_constraint -> is_subtype -> is_callable_subtype ->
# -> infer_constraints.
inferring = None # type: List[mypy.types.Instance]
# 'inferring' and 'assuming' can't be made sets, since we need to use
# is_same_type to correctly treat unions.
# Classes inheriting from Enum shadow their true members with a __getattr__, so we
# have to treat them as a special case.
is_enum = False
# If true, any unknown attributes should have type 'Any' instead
# of generating a type error. This would be true if there is a
# base class with type 'Any', but other use cases may be
# possible. This is similar to having __getattr__ that returns Any
# (and __setattr__), but without the __getattr__ method.
fallback_to_any = False
# Information related to type annotations.
# Generic type variable names (full names)
type_vars = None # type: List[str]
# Direct base classes.
bases = None # type: List[mypy.types.Instance]
# Another type which this type will be treated as a subtype of,
# even though it's not a subclass in Python. The non-standard
# `@_promote` decorator introduces this, and there are also
# several builtin examples, in particular `int` -> `float`.
_promote = None # type: Optional[mypy.types.Type]
# Representation of a Tuple[...] base class, if the class has any
# (e.g., for named tuples). If this is not None, the actual Type
# object used for this class is not an Instance but a TupleType;
# the corresponding Instance is set as the fallback type of the
# tuple type.
tuple_type = None # type: Optional[mypy.types.TupleType]
# Is this a named tuple type?
is_named_tuple = False
# If this class is defined by the TypedDict type constructor,
# then this is not None.
typeddict_type = None # type: Optional[mypy.types.TypedDictType]
# Is this a newtype type?
is_newtype = False
# Is this a synthesized intersection type?
is_intersection = False
# This is a dictionary that will be serialized and un-serialized as is.
# It is useful for plugins to add their data to save in the cache.
metadata = None # type: Dict[str, JsonDict]
FLAGS = [
'is_abstract', 'is_enum', 'fallback_to_any', 'is_named_tuple',
'is_newtype', 'is_protocol', 'runtime_protocol', 'is_final',
'is_intersection',
] # type: Final[List[str]]
def __init__(self, names: 'SymbolTable', defn: ClassDef, module_name: str) -> None:
"""Initialize a TypeInfo."""
super().__init__()
self.names = names
self.defn = defn
self.module_name = module_name
self.type_vars = []
self.bases = []
self.mro = []
self._fullname = defn.fullname
self.is_abstract = False
self.abstract_attributes = []
self.assuming = []
self.assuming_proper = []
self.inferring = []
self.add_type_vars()
self.metadata = {}
self.is_final = False
def add_type_vars(self) -> None:
if self.defn.type_vars:
for vd in self.defn.type_vars:
self.type_vars.append(vd.fullname)
@property
def name(self) -> str:
"""Short name."""
return self.defn.name
@property
def fullname(self) -> Bogus[str]:
return self._fullname
def is_generic(self) -> bool:
"""Is the type generic (i.e. does it have type variables)?"""
return len(self.type_vars) > 0
def get(self, name: str) -> 'Optional[SymbolTableNode]':
for cls in self.mro:
n = cls.names.get(name)
if n:
return n
return None
def get_containing_type_info(self, name: str) -> 'Optional[TypeInfo]':
for cls in self.mro:
if name in cls.names:
return cls
return None
@property
def protocol_members(self) -> List[str]:
# Protocol members are names of all attributes/methods defined in a protocol
# and in all its supertypes (except for 'object').
members = set() # type: Set[str]
assert self.mro, "This property can be only accessed after MRO is (re-)calculated"
for base in self.mro[:-1]: # we skip "object" since everyone implements it
if base.is_protocol:
for name in base.names:
members.add(name)
return sorted(list(members))
def __getitem__(self, name: str) -> 'SymbolTableNode':
n = self.get(name)
if n:
return n
else:
raise KeyError(name)
def __repr__(self) -> str:
return '<TypeInfo %s>' % self.fullname
def __bool__(self) -> bool:
# We defined this here instead of just overriding it in
# FakeInfo so that mypyc can generate a direct call instead of
# using the generic bool handling.
return not isinstance(self, FakeInfo)
def has_readable_member(self, name: str) -> bool:
return self.get(name) is not None
def get_method(self, name: str) -> Optional[FuncBase]:
for cls in self.mro:
if name in cls.names:
node = cls.names[name].node
if isinstance(node, FuncBase):
return node
else:
return None
return None
def calculate_metaclass_type(self) -> 'Optional[mypy.types.Instance]':
declared = self.declared_metaclass
if declared is not None and not declared.type.has_base('builtins.type'):
return declared
if self._fullname == 'builtins.type':
return mypy.types.Instance(self, [])
candidates = [s.declared_metaclass
for s in self.mro
if s.declared_metaclass is not None
and s.declared_metaclass.type is not None]
for c in candidates:
if all(other.type in c.type.mro for other in candidates):
return c
return None
def is_metaclass(self) -> bool:
return (self.has_base('builtins.type') or self.fullname == 'abc.ABCMeta' or
self.fallback_to_any)
def has_base(self, fullname: str) -> bool:
"""Return True if type has a base type with the specified name.
This can be either via extension or via implementation.
"""
for cls in self.mro:
if cls.fullname == fullname:
return True
return False
def direct_base_classes(self) -> 'List[TypeInfo]':
"""Return a direct base classes.
Omit base classes of other base classes.
"""
return [base.type for base in self.bases]
def __str__(self) -> str:
"""Return a string representation of the type.
This includes the most important information about the type.
"""
return self.dump()
def dump(self,
str_conv: 'Optional[mypy.strconv.StrConv]' = None,
type_str_conv: 'Optional[mypy.types.TypeStrVisitor]' = None) -> str:
"""Return a string dump of the contents of the TypeInfo."""
if not str_conv:
str_conv = mypy.strconv.StrConv()
base = '' # type: str
def type_str(typ: 'mypy.types.Type') -> str:
if type_str_conv:
return typ.accept(type_str_conv)
return str(typ)
head = 'TypeInfo' + str_conv.format_id(self)
if self.bases:
base = 'Bases({})'.format(', '.join(type_str(base)
for base in self.bases))
mro = 'Mro({})'.format(', '.join(item.fullname + str_conv.format_id(item)
for item in self.mro))
names = []
for name in sorted(self.names):
description = name + str_conv.format_id(self.names[name].node)
node = self.names[name].node
if isinstance(node, Var) and node.type:
description += ' ({})'.format(type_str(node.type))
names.append(description)
items = [
'Name({})'.format(self.fullname),
base,
mro,
('Names', names),
]
if self.declared_metaclass:
items.append('DeclaredMetaclass({})'.format(type_str(self.declared_metaclass)))
if self.metaclass_type:
items.append('MetaclassType({})'.format(type_str(self.metaclass_type)))
return mypy.strconv.dump_tagged(
items,
head,
str_conv=str_conv)
def serialize(self) -> JsonDict:
# NOTE: This is where all ClassDefs originate, so there shouldn't be duplicates.
data = {'.class': 'TypeInfo',
'module_name': self.module_name,
'fullname': self.fullname,
'names': self.names.serialize(self.fullname),
'defn': self.defn.serialize(),
'abstract_attributes': self.abstract_attributes,
'type_vars': self.type_vars,
'bases': [b.serialize() for b in self.bases],
'mro': [c.fullname for c in self.mro],
'_promote': None if self._promote is None else self._promote.serialize(),
'declared_metaclass': (None if self.declared_metaclass is None
else self.declared_metaclass.serialize()),
'metaclass_type':
None if self.metaclass_type is None else self.metaclass_type.serialize(),
'tuple_type': None if self.tuple_type is None else self.tuple_type.serialize(),
'typeddict_type':
None if self.typeddict_type is None else self.typeddict_type.serialize(),
'flags': get_flags(self, TypeInfo.FLAGS),
'metadata': self.metadata,
}
return data
@classmethod
def deserialize(cls, data: JsonDict) -> 'TypeInfo':
names = SymbolTable.deserialize(data['names'])
defn = ClassDef.deserialize(data['defn'])
module_name = data['module_name']
ti = TypeInfo(names, defn, module_name)
ti._fullname = data['fullname']
# TODO: Is there a reason to reconstruct ti.subtypes?
ti.abstract_attributes = data['abstract_attributes']
ti.type_vars = data['type_vars']
ti.bases = [mypy.types.Instance.deserialize(b) for b in data['bases']]
ti._promote = (None if data['_promote'] is None
else mypy.types.deserialize_type(data['_promote']))
ti.declared_metaclass = (None if data['declared_metaclass'] is None
else mypy.types.Instance.deserialize(data['declared_metaclass']))
ti.metaclass_type = (None if data['metaclass_type'] is None
else mypy.types.Instance.deserialize(data['metaclass_type']))
# NOTE: ti.mro will be set in the fixup phase based on these
# names. The reason we need to store the mro instead of just
# recomputing it from base classes has to do with a subtle
# point about fine-grained incremental: the cache files might
# not be loaded until after a class in the mro has changed its
# bases, which causes the mro to change. If we recomputed our
# mro, we would compute the *new* mro, which leaves us with no
# way to detect that the mro has changed! Thus we need to make
# sure to load the original mro so that once the class is
# rechecked, it can tell that the mro has changed.
ti._mro_refs = data['mro']
ti.tuple_type = (None if data['tuple_type'] is None
else mypy.types.TupleType.deserialize(data['tuple_type']))
ti.typeddict_type = (None if data['typeddict_type'] is None
else mypy.types.TypedDictType.deserialize(data['typeddict_type']))
ti.metadata = data['metadata']
set_flags(ti, data['flags'])
return ti
class FakeInfo(TypeInfo):
# types.py defines a single instance of this class, called types.NOT_READY.
# This instance is used as a temporary placeholder in the process of de-serialization
# of 'Instance' types. The de-serialization happens in two steps: In the first step,
# Instance.type is set to NOT_READY. In the second step (in fixup.py) it is replaced by
# an actual TypeInfo. If you see the assertion error below, then most probably something
# went wrong during the second step and an 'Instance' that raised this error was not fixed.
# Note:
# 'None' is not used as a dummy value for two reasons:
# 1. This will require around 80-100 asserts to make 'mypy --strict-optional mypy'
# pass cleanly.
# 2. If NOT_READY value is accidentally used somewhere, it will be obvious where the value
# is from, whereas a 'None' value could come from anywhere.
#
# Additionally, this serves as a more general-purpose placeholder
# for missing TypeInfos in a number of places where the excuses
# for not being Optional are a little weaker.
#
# TypeInfo defines a __bool__ method that returns False for FakeInfo
# so that it can be conveniently tested against in the same way that it
# would be if things were properly optional.
def __init__(self, msg: str) -> None:
self.msg = msg
def __getattribute__(self, attr: str) -> None:
# Handle __class__ so that isinstance still works...
if attr == '__class__':
return object.__getattribute__(self, attr)
raise AssertionError(object.__getattribute__(self, 'msg'))
VAR_NO_INFO = FakeInfo('Var is lacking info') # type: Final[TypeInfo]
CLASSDEF_NO_INFO = FakeInfo('ClassDef is lacking info') # type: Final[TypeInfo]
FUNC_NO_INFO = FakeInfo('FuncBase for non-methods lack info') # type: Final[TypeInfo]
class TypeAlias(SymbolNode):
"""
A symbol node representing a type alias.
Type alias is a static concept, in contrast to variables with types
like Type[...]. Namely:
* type aliases
- can be used in type context (annotations)
- cannot be re-assigned
* variables with type Type[...]
- cannot be used in type context
- but can be re-assigned
An alias can be defined only by an assignment to a name (not any other lvalues).
Such assignment defines an alias by default. To define a variable,
an explicit Type[...] annotation is required. As an exception,
at non-global scope non-subscripted rvalue creates a variable even without
an annotation. This exception exists to accommodate the common use case of
class-valued attributes. See SemanticAnalyzerPass2.check_and_set_up_type_alias
for details.
Aliases can be generic. Currently, mypy uses unbound type variables for
generic aliases and identifies them by name. Essentially, type aliases
work as macros that expand textually. The definition and expansion rules are
following:
1. An alias targeting a generic class without explicit variables act as
the given class (this doesn't apply to Tuple and Callable, which are not proper
classes but special type constructors):
A = List
AA = List[Any]
x: A # same as List[Any]
x: A[int] # same as List[int]
x: AA # same as List[Any]
x: AA[int] # Error!
C = Callable # Same as Callable[..., Any]
T = Tuple # Same as Tuple[Any, ...]
2. An alias using explicit type variables in its rvalue expects
replacements (type arguments) for these variables. If missing, they
are treated as Any, like for other generics:
B = List[Tuple[T, T]]
x: B # same as List[Tuple[Any, Any]]
x: B[int] # same as List[Tuple[int, int]]
def f(x: B[T]) -> T: ... # without T, Any would be used here
3. An alias can be defined using another aliases. In the definition
rvalue the Any substitution doesn't happen for top level unsubscripted
generic classes:
A = List
B = A # here A is expanded to List, _not_ List[Any],
# to match the Python runtime behaviour
x: B[int] # same as List[int]
C = List[A] # this expands to List[List[Any]]
AA = List[T]
D = AA # here AA expands to List[Any]
x: D[int] # Error!
Note: the fact that we support aliases like `A = List` means that the target
type will be initially an instance type with wrong number of type arguments.
Such instances are all fixed in the third pass of semantic analyzis.
We therefore store the difference between `List` and `List[Any]` rvalues (targets)
using the `no_args` flag. See also TypeAliasExpr.no_args.
Meaning of other fields:
target: The target type. For generic aliases contains unbound type variables
as nested types.
_fullname: Qualified name of this type alias. This is used in particular
to track fine grained dependencies from aliases.
alias_tvars: Names of unbound type variables used to define this alias.
normalized: Used to distinguish between `A = List`, and `A = list`. Both
are internally stored using `builtins.list` (because `typing.List` is
itself an alias), while the second cannot be subscripted because of
Python runtime limitation.
line and column: Line an column on the original alias definition.
"""
__slots__ = ('target', '_fullname', 'alias_tvars', 'no_args', 'normalized',
'line', 'column', '_is_recursive')
def __init__(self, target: 'mypy.types.Type', fullname: str, line: int, column: int,
*,
alias_tvars: Optional[List[str]] = None,
no_args: bool = False,
normalized: bool = False) -> None:
self._fullname = fullname
self.target = target
if alias_tvars is None:
alias_tvars = []
self.alias_tvars = alias_tvars
self.no_args = no_args
self.normalized = normalized
# This attribute is manipulated by TypeAliasType. If non-None,
# it is the cached value.
self._is_recursive = None # type: Optional[bool]
super().__init__(line, column)
@property
def name(self) -> str:
return self._fullname.split('.')[-1]
@property
def fullname(self) -> str:
return self._fullname
def serialize(self) -> JsonDict:
data = {'.class': 'TypeAlias',
'fullname': self._fullname,
'target': self.target.serialize(),
'alias_tvars': self.alias_tvars,
'no_args': self.no_args,
'normalized': self.normalized,
'line': self.line,
'column': self.column
} # type: JsonDict
return data
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_type_alias(self)
@classmethod
def deserialize(cls, data: JsonDict) -> 'TypeAlias':
assert data['.class'] == 'TypeAlias'
fullname = data['fullname']
alias_tvars = data['alias_tvars']
target = mypy.types.deserialize_type(data['target'])
no_args = data['no_args']
normalized = data['normalized']
line = data['line']
column = data['column']
return cls(target, fullname, line, column, alias_tvars=alias_tvars,
no_args=no_args, normalized=normalized)
class PlaceholderNode(SymbolNode):
"""Temporary symbol node that will later become a real SymbolNode.
These are only present during semantic analysis when using the new
semantic analyzer. These are created if some essential dependencies
of a definition are not yet complete.
A typical use is for names imported from a module which is still
incomplete (within an import cycle):
from m import f # Initially may create PlaceholderNode
This is particularly important if the imported shadows a name from
an enclosing scope or builtins:
from m import int # Placeholder avoids mixups with builtins.int
Another case where this is useful is when there is another definition
or assignment:
from m import f
def f() -> None: ...
In the above example, the presence of PlaceholderNode allows us to
handle the second definition as a redefinition.
They are also used to create PlaceholderType instances for types
that refer to incomplete types. Example:
class C(Sequence[C]): ...
We create a PlaceholderNode (with becomes_typeinfo=True) for C so
that the type C in Sequence[C] can be bound.
Attributes:
fullname: Full name of of the PlaceholderNode.
node: AST node that contains the definition that caused this to
be created. This is useful for tracking order of incomplete definitions
and for debugging.
becomes_typeinfo: If True, this refers something that could later
become a TypeInfo. It can't be used with type variables, in
particular, as this would cause issues with class type variable
detection.
The long-term purpose of placeholder nodes/types is to evolve into
something that can support general recursive types.
"""
def __init__(self, fullname: str, node: Node, line: int, *,
becomes_typeinfo: bool = False) -> None:
self._fullname = fullname
self.node = node
self.becomes_typeinfo = becomes_typeinfo
self.line = line
@property
def name(self) -> str:
return self._fullname.split('.')[-1]
@property
def fullname(self) -> str:
return self._fullname
def serialize(self) -> JsonDict:
assert False, "PlaceholderNode can't be serialized"
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_placeholder_node(self)
class SymbolTableNode:
"""Description of a name binding in a symbol table.
These are only used as values in module (global), function (local)
and class symbol tables (see SymbolTable). The name that is bound is
the key in SymbolTable.
Symbol tables don't contain direct references to AST nodes primarily
because there can be multiple symbol table references to a single
AST node (due to imports and aliases), and different references can
behave differently. This class describes the unique properties of
each reference.
The most fundamental attribute is 'node', which is the AST node that
the name refers to.
The kind is usually one of LDEF, GDEF or MDEF, depending on the scope
of the definition. These three kinds can usually be used
interchangeably and the difference between local, global and class
scopes is mostly descriptive, with no semantic significance.
However, some tools that consume mypy ASTs may care about these so
they should be correct.
Attributes:
node: AST node of definition. Among others, this can be one of
FuncDef, Var, TypeInfo, TypeVarExpr or MypyFile -- or None
for cross_ref that hasn't been fixed up yet.
kind: Kind of node. Possible values:
- LDEF: local definition
- GDEF: global (module-level) definition
- MDEF: class member definition
- UNBOUND_IMPORTED: temporary kind for imported names (we
don't know the final kind yet)
module_public: If False, this name won't be imported via
'from <module> import *'. This has no effect on names within
classes.
module_hidden: If True, the name will be never exported (needed for
stub files)
cross_ref: For deserialized MypyFile nodes, the referenced module
name; for other nodes, optionally the name of the referenced object.
implicit: Was this defined by assignment to self attribute?
plugin_generated: Was this symbol generated by a plugin?
(And therefore needs to be removed in aststrip.)
no_serialize: Do not serialize this node if True. This is used to prevent
keys in the cache that refer to modules on which this file does not
depend. Currently this can happen if there is a module not in build
used e.g. like this:
import a.b.c # type: ignore
This will add a submodule symbol to parent module `a` symbol table,
but `a.b` is _not_ added as its dependency. Therefore, we should
not serialize these symbols as they may not be found during fixup
phase, instead they will be re-added during subsequent patch parents
phase.
TODO: Refactor build.py to make dependency tracking more transparent
and/or refactor look-up functions to not require parent patching.
NOTE: No other attributes should be added to this class unless they
are shared by all node kinds.
"""
__slots__ = ('kind',
'node',
'module_public',
'module_hidden',
'cross_ref',
'implicit',
'plugin_generated',
'no_serialize',
)
def __init__(self,
kind: int,
node: Optional[SymbolNode],
module_public: bool = True,
implicit: bool = False,
module_hidden: bool = False,
*,
plugin_generated: bool = False,
no_serialize: bool = False) -> None:
self.kind = kind
self.node = node
self.module_public = module_public
self.implicit = implicit
self.module_hidden = module_hidden
self.cross_ref = None # type: Optional[str]
self.plugin_generated = plugin_generated
self.no_serialize = no_serialize
@property
def fullname(self) -> Optional[str]:
if self.node is not None:
return self.node.fullname
else:
return None
@property
def type(self) -> 'Optional[mypy.types.Type]':
node = self.node
if isinstance(node, (Var, SYMBOL_FUNCBASE_TYPES)) and node.type is not None:
return node.type
elif isinstance(node, Decorator):
return node.var.type
else:
return None
def copy(self) -> 'SymbolTableNode':
new = SymbolTableNode(self.kind,
self.node,
self.module_public,
self.implicit,
self.module_hidden)
new.cross_ref = self.cross_ref
return new
def __str__(self) -> str:
s = '{}/{}'.format(node_kinds[self.kind], short_type(self.node))
if isinstance(self.node, SymbolNode):
s += ' ({})'.format(self.node.fullname)
# Include declared type of variables and functions.
if self.type is not None:
s += ' : {}'.format(self.type)
return s
def serialize(self, prefix: str, name: str) -> JsonDict:
"""Serialize a SymbolTableNode.
Args:
prefix: full name of the containing module or class; or None
name: name of this object relative to the containing object
"""
data = {'.class': 'SymbolTableNode',
'kind': node_kinds[self.kind],
} # type: JsonDict
if self.module_hidden:
data['module_hidden'] = True
if not self.module_public:
data['module_public'] = False
if self.implicit:
data['implicit'] = True
if self.plugin_generated:
data['plugin_generated'] = True
if isinstance(self.node, MypyFile):
data['cross_ref'] = self.node.fullname
else:
assert self.node is not None, '%s:%s' % (prefix, name)
if prefix is not None:
fullname = self.node.fullname
if (fullname is not None and '.' in fullname
and fullname != prefix + '.' + name
and not (isinstance(self.node, Var)
and self.node.from_module_getattr)):
assert not isinstance(self.node, PlaceholderNode)
data['cross_ref'] = fullname
return data
data['node'] = self.node.serialize()
return data
@classmethod
def deserialize(cls, data: JsonDict) -> 'SymbolTableNode':
assert data['.class'] == 'SymbolTableNode'
kind = inverse_node_kinds[data['kind']]
if 'cross_ref' in data:
# This will be fixed up later.
stnode = SymbolTableNode(kind, None)
stnode.cross_ref = data['cross_ref']
else:
assert 'node' in data, data
node = SymbolNode.deserialize(data['node'])
stnode = SymbolTableNode(kind, node)
if 'module_hidden' in data:
stnode.module_hidden = data['module_hidden']
if 'module_public' in data:
stnode.module_public = data['module_public']
if 'implicit' in data:
stnode.implicit = data['implicit']
if 'plugin_generated' in data:
stnode.plugin_generated = data['plugin_generated']
return stnode
class SymbolTable(Dict[str, SymbolTableNode]):
"""Static representation of a namespace dictionary.
This is used for module, class and function namespaces.
"""
def __str__(self) -> str:
a = [] # type: List[str]
for key, value in self.items():
# Filter out the implicit import of builtins.
if isinstance(value, SymbolTableNode):
if (value.fullname != 'builtins' and
(value.fullname or '').split('.')[-1] not in
implicit_module_attrs):
a.append(' ' + str(key) + ' : ' + str(value))
else:
a.append(' <invalid item>')
a = sorted(a)
a.insert(0, 'SymbolTable(')
a[-1] += ')'
return '\n'.join(a)
def copy(self) -> 'SymbolTable':
return SymbolTable([(key, node.copy())
for key, node in self.items()])
def serialize(self, fullname: str) -> JsonDict:
data = {'.class': 'SymbolTable'} # type: JsonDict
for key, value in self.items():
# Skip __builtins__: it's a reference to the builtins
# module that gets added to every module by
# SemanticAnalyzerPass2.visit_file(), but it shouldn't be
# accessed by users of the module.
if key == '__builtins__' or value.no_serialize:
continue
data[key] = value.serialize(fullname, key)
return data
@classmethod
def deserialize(cls, data: JsonDict) -> 'SymbolTable':
assert data['.class'] == 'SymbolTable'
st = SymbolTable()
for key, value in data.items():
if key != '.class':
st[key] = SymbolTableNode.deserialize(value)
return st
def get_flags(node: Node, names: List[str]) -> List[str]:
return [name for name in names if getattr(node, name)]
def set_flags(node: Node, flags: List[str]) -> None:
for name in flags:
setattr(node, name, True)
def get_member_expr_fullname(expr: MemberExpr) -> Optional[str]:
"""Return the qualified name representation of a member expression.
Return a string of form foo.bar, foo.bar.baz, or similar, or None if the
argument cannot be represented in this form.
"""
initial = None # type: Optional[str]
if isinstance(expr.expr, NameExpr):
initial = expr.expr.name
elif isinstance(expr.expr, MemberExpr):
initial = get_member_expr_fullname(expr.expr)
else:
return None
return '{}.{}'.format(initial, expr.name)
deserialize_map = {
key: obj.deserialize
for key, obj in globals().items()
if type(obj) is not FakeInfo
and isinstance(obj, type) and issubclass(obj, SymbolNode) and obj is not SymbolNode
} # type: Final
def check_arg_kinds(arg_kinds: List[int], nodes: List[T], fail: Callable[[str, T], None]) -> None:
is_var_arg = False
is_kw_arg = False
seen_named = False
seen_opt = False
for kind, node in zip(arg_kinds, nodes):
if kind == ARG_POS:
if is_var_arg or is_kw_arg or seen_named or seen_opt:
fail("Required positional args may not appear "
"after default, named or var args",
node)
break
elif kind == ARG_OPT:
if is_var_arg or is_kw_arg or seen_named:
fail("Positional default args may not appear after named or var args", node)
break
seen_opt = True
elif kind == ARG_STAR:
if is_var_arg or is_kw_arg or seen_named:
fail("Var args may not appear after named or var args", node)
break
is_var_arg = True
elif kind == ARG_NAMED or kind == ARG_NAMED_OPT:
seen_named = True
if is_kw_arg:
fail("A **kwargs argument must be the last argument", node)
break
elif kind == ARG_STAR2:
if is_kw_arg:
fail("You may only have one **kwargs argument", node)
break
is_kw_arg = True
def check_arg_names(names: Sequence[Optional[str]], nodes: List[T], fail: Callable[[str, T], None],
description: str = 'function definition') -> None:
seen_names = set() # type: Set[Optional[str]]
for name, node in zip(names, nodes):
if name is not None and name in seen_names:
fail("Duplicate argument '{}' in {}".format(name, description), node)
break
seen_names.add(name)
def is_class_var(expr: NameExpr) -> bool:
"""Return whether the expression is ClassVar[...]"""
if isinstance(expr.node, Var):
return expr.node.is_classvar
return False
def is_final_node(node: Optional[SymbolNode]) -> bool:
"""Check whether `node` corresponds to a final attribute."""
return isinstance(node, (Var, FuncDef, OverloadedFuncDef, Decorator)) and node.is_final
def local_definitions(names: SymbolTable,
name_prefix: str,
info: Optional[TypeInfo] = None) -> Iterator[Definition]:
"""Iterate over local definitions (not imported) in a symbol table.
Recursively iterate over class members and nested classes.
"""
# TODO: What should the name be? Or maybe remove it?
for name, symnode in names.items():
shortname = name
if '-redef' in name:
# Restore original name from mangled name of multiply defined function
shortname = name.split('-redef')[0]
fullname = name_prefix + '.' + shortname
node = symnode.node
if node and node.fullname == fullname:
yield fullname, symnode, info
if isinstance(node, TypeInfo):
yield from local_definitions(node.names, fullname, node)
| 35.327284 | 99 | 0.618926 |
14f101791d2fe3da76d97db64c0d8c720765b1e2 | 237 | py | Python | config/app_config.py | Juyeon125/Multi_pred | 048986428434d9927989f86e93ea6fa6ce33d0ae | [
"MIT"
] | 1 | 2020-10-15T08:05:41.000Z | 2020-10-15T08:05:41.000Z | config/app_config.py | Juyeon125/Multi_pred | 048986428434d9927989f86e93ea6fa6ce33d0ae | [
"MIT"
] | null | null | null | config/app_config.py | Juyeon125/Multi_pred | 048986428434d9927989f86e93ea6fa6ce33d0ae | [
"MIT"
] | null | null | null | import os
class LocalLevelConfig:
ENV = "development"
DEBUG = True
SECRET_KEY = "ce7ea57bcec4ea045191c43a"
class ProductionLevelConfig:
ENV = "production"
DEBUG = False
SECRET_KEY = "5ae9fc9c349602a1111ef1d4"
| 16.928571 | 43 | 0.71308 |
5a65d2ad4d0faf195a86a3cd8d1a394318396e23 | 354 | py | Python | visual_novel/visual_novel/settings/local.py | dolamroth/visual_novel | c67379df395561b3bca7e91e2db6547d2e943330 | [
"MIT"
] | 9 | 2018-03-11T12:53:12.000Z | 2020-12-19T14:21:53.000Z | visual_novel/visual_novel/settings/local.py | dolamroth/visual_novel | c67379df395561b3bca7e91e2db6547d2e943330 | [
"MIT"
] | 6 | 2020-02-11T22:19:22.000Z | 2022-03-11T23:20:10.000Z | visual_novel/visual_novel/settings/local.py | dolamroth/visual_novel | c67379df395561b3bca7e91e2db6547d2e943330 | [
"MIT"
] | null | null | null | from .base import *
DEBUG = True
STATIC_ROOT = os.path.join(BASE_DIR, '')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
ALLOWED_HOSTS += ['127.0.0.1']
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.db.DatabaseCache",
"LOCATION": "cache_table_for_local_development"
}
}
WSGI_APPLICATION = None
| 16.857143 | 65 | 0.649718 |
d6695af7d9358a0cad2ad2f4a628fd0deb2f7cc6 | 673 | py | Python | pyrival/algebra/discrete_log.py | MattJDavidson/aoc2021 | 1c26697da55e58408f36525639d201303f808b1b | [
"Apache-2.0"
] | 748 | 2018-09-27T01:08:12.000Z | 2022-03-25T17:31:56.000Z | pyrival/algebra/discrete_log.py | MattJDavidson/aoc2021 | 1c26697da55e58408f36525639d201303f808b1b | [
"Apache-2.0"
] | 38 | 2019-02-24T14:50:02.000Z | 2022-03-25T01:27:50.000Z | pyrival/algebra/discrete_log.py | MattJDavidson/aoc2021 | 1c26697da55e58408f36525639d201303f808b1b | [
"Apache-2.0"
] | 288 | 2018-10-29T11:55:57.000Z | 2022-03-20T04:37:27.000Z | def discrete_log(a, b, mod):
"""
Returns smallest x > 0 s.t. pow(a, x, mod) == b or None if no such x exists.
Note: works even if a and mod are not coprime.
"""
n = int(mod**0.5) + 1
# tiny_step[x] = maximum j <= n s.t. b * a^j % mod = x
tiny_step, e = {}, 1
for j in range(1, n + 1):
e = e * a % mod
if e == b:
return j
tiny_step[b * e % mod] = j
# find (i, j) s.t. a^(n * i) % mod = b * a^j % mod
factor = e
for i in range(2, n + 2):
e = e * factor % mod
if e in tiny_step:
j = tiny_step[e]
return n * i - j if pow(a, n * i - j, mod) == b else None
| 29.26087 | 80 | 0.454681 |
fd3b253adf92e534ce06a5619ca9a08e2fdfce41 | 1,234 | py | Python | coinnews/pipelines.py | jlparadox/coinnews | 09a99e67115eda36084219cbd77967e73b6017c7 | [
"Apache-2.0"
] | null | null | null | coinnews/pipelines.py | jlparadox/coinnews | 09a99e67115eda36084219cbd77967e73b6017c7 | [
"Apache-2.0"
] | null | null | null | coinnews/pipelines.py | jlparadox/coinnews | 09a99e67115eda36084219cbd77967e73b6017c7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
class CoinnewsPipeline(object):
collection_name = 'coin_articles'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
for data in item:
if not data:
raise DropItem("Missing data!")
self.db[self.collection_name].insert_one(dict(item))
log.msg("Question added to MongoDB database!",
level=log.DEBUG, spider=spider)
return item
| 28.697674 | 68 | 0.657212 |
219ca34c168633e3585fa899e4bfe6606dc0f89c | 566 | py | Python | ontask/migrations/0017_auto_20180523_1611.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 33 | 2017-12-02T04:09:24.000Z | 2021-11-07T08:41:57.000Z | ontask/migrations/0017_auto_20180523_1611.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 189 | 2017-11-16T04:06:29.000Z | 2022-03-11T23:35:59.000Z | ontask/migrations/0017_auto_20180523_1611.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | 30 | 2017-11-30T03:35:44.000Z | 2022-01-31T03:08:08.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-05-23 06:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ontask', '0016_auto_20180522_2146'),
]
operations = [
migrations.AlterModelOptions(
name='condition',
options={'ordering': ('created',)},
),
migrations.AlterUniqueTogether(
name='condition',
unique_together=set([('action', 'name', 'is_filter')]),
),
]
| 23.583333 | 67 | 0.591873 |
2188bbcebceafaf01e068c75c5ac436615cf0a98 | 16,695 | py | Python | mmhuman3d/core/visualization/renderer/matplotlib3d_renderer.py | yl-1993/mmhuman3d | 61a7427b7882d5e5f5fe623272a5c455c3d3b009 | [
"Apache-2.0"
] | 472 | 2021-12-03T03:12:55.000Z | 2022-03-31T01:33:13.000Z | mmhuman3d/core/visualization/renderer/matplotlib3d_renderer.py | yl-1993/mmhuman3d | 61a7427b7882d5e5f5fe623272a5c455c3d3b009 | [
"Apache-2.0"
] | 127 | 2021-12-03T05:00:14.000Z | 2022-03-31T13:47:33.000Z | mmhuman3d/core/visualization/renderer/matplotlib3d_renderer.py | yl-1993/mmhuman3d | 61a7427b7882d5e5f5fe623272a5c455c3d3b009 | [
"Apache-2.0"
] | 37 | 2021-12-03T03:23:22.000Z | 2022-03-31T08:41:58.000Z | import io
import os
import shutil
from pathlib import Path
from typing import Iterable, List, Optional, Union
import cv2
import mmcv
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
from mmhuman3d.core.conventions.cameras.convert_convention import \
enc_camera_convention # prevent yapf isort conflict
from mmhuman3d.utils import get_different_colors
from mmhuman3d.utils.ffmpeg_utils import images_to_video
from mmhuman3d.utils.path_utils import check_path_suffix
class Axes3dBaseRenderer(object):
"""Base renderer."""
def init_camera(self,
cam_elev_angle=10,
cam_elev_speed=0.0,
cam_hori_angle=45,
cam_hori_speed=0.5):
"""Initiate the route of camera with arguments.
Args:
cam_elev_angle (int, optional):
The pitch angle where camera starts.
Defaults to 10.
cam_elev_speed (float, optional):
The pitch angle camera steps in one frame.
It will go back and forth between -30 and 30 degree.
Defaults to 0.0.
cam_hori_angle (int, optional):
The yaw angle where camera starts. Defaults to 45.
cam_hori_speed (float, optional):
The yaw angle camera steps in one frame.
It will go back and forth between 0 and 90 degree.
Defaults to 0.5.
"""
self.cam_elevation_args = [cam_elev_angle, cam_elev_speed]
self.cam_horizon_args = [cam_hori_angle, cam_hori_speed]
self.if_camera_init = True
def _get_camera_vector_list(self, frame_number):
"""Generate self.cam_vector_list according to hori and elev arguments.
Args:
frame_number (int):
Number of frames.
Returns:
List[List[float, float]]:
A list of float vectors.
"""
self.cam_vector_list = [
[self.cam_elevation_args[0], self.cam_horizon_args[0]],
]
ele_sign = 1
hor_sign = 1
for _ in range(frame_number - 1):
new_ele_angle = ele_sign * self.cam_elevation_args[
1] + self.cam_vector_list[-1][0]
# if elevation angle out of range, go backwards
if new_ele_angle <= self.cam_elevation_args[
1] or new_ele_angle >= 30:
ele_sign = (-1) * ele_sign
new_ele_angle = (
ele_sign * self.cam_elevation_args[1] +
self.cam_vector_list[-1][0])
new_hor_angle = (
hor_sign * self.cam_horizon_args[1] +
self.cam_vector_list[-1][1])
# if horizon angle out of range, go backwards
if new_hor_angle >= 90 - 2 * self.cam_horizon_args[
1] or new_hor_angle <= 2 * self.cam_horizon_args[1]:
hor_sign = (-1) * hor_sign
new_hor_angle = (
hor_sign * self.cam_horizon_args[1] +
self.cam_vector_list[-1][1])
self.cam_vector_list.append([new_ele_angle, new_hor_angle])
return self.cam_vector_list
@staticmethod
def _get_visual_range(points: np.ndarray) -> np.ndarray:
"""Calculate the visual range according to the input points. It make
sure that no point is absent.
Args:
points (np.ndarray):
An array of 3D points.
Axis at the last dim.
Returns:
np.ndarray:
An array in shape [3, 2].
It marks the lower bound and the upper bound
along each axis.
"""
axis_num = points.shape[-1]
axis_stat = np.zeros(shape=[axis_num, 4])
for axis_index in range(axis_num):
axis_data = points[..., axis_index]
axis_min = np.min(axis_data)
axis_max = np.max(axis_data)
axis_mid = (axis_min + axis_max) / 2.0
axis_span = axis_max - axis_min
axis_stat[axis_index] = np.asarray(
(axis_min, axis_max, axis_mid, axis_span))
max_span = np.max(axis_stat[:, 3])
visual_range = np.zeros(shape=[axis_num, 2])
for axis_index in range(axis_num):
visual_range[axis_index, 0] =\
axis_stat[axis_index, 2] - max_span/2.0
visual_range[axis_index, 1] =\
axis_stat[axis_index, 2] + max_span/2.0
return visual_range
def _draw_scene(self,
visual_range,
axis_len=1.0,
cam_elev_angle=10,
cam_hori_angle=45):
"""Draw an empty scene according to visual range and camera vector.
Args:
visual_range (np.ndarray):
Return value of _get_visual_range().
axis_len (float, optional):
The length of every axis.
Defaults to 1.0.
cam_elev_angle (int, optional):
Pitch angle of the camera.
Defaults to 10.
cam_hori_angle (int, optional):
Yaw angle of the camera.
Defaults to 45.
Returns:
list: Figure and Axes3D
"""
fig = plt.figure()
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
ax.set_xlim(*visual_range[0])
ax.set_ylim(*visual_range[1])
ax.set_zlim(*visual_range[2])
ax.view_init(cam_elev_angle, cam_hori_angle)
mid_point = [
np.average(visual_range[0]),
np.average(visual_range[1]),
np.average(visual_range[2]),
]
# draw axis
zero_point = np.array([0, 0, 0])
x_axis = np.array([(visual_range[0][1] - mid_point[0]) * axis_len, 0,
0])
y_axis = np.array(
[0, (visual_range[1][1] - mid_point[1]) * axis_len, 0])
z_axis = np.array(
[0, 0, (visual_range[2][1] - mid_point[2]) * axis_len])
ax = _plot_line_on_fig(ax, zero_point, x_axis, 'r')
ax = _plot_line_on_fig(ax, zero_point, y_axis, 'g')
ax = _plot_line_on_fig(ax, zero_point, z_axis, 'b')
return fig, ax
class Axes3dJointsRenderer(Axes3dBaseRenderer):
"""Render of joints."""
def __init__(self):
self.if_camera_init = False
self.cam_vector_list = None
self.if_connection_setup = False
self.if_frame_updated = False
self.temp_path = ''
def set_connections(self, limbs_connection, limbs_palette):
"""set body limbs."""
self.limbs_connection = limbs_connection
self.limbs_palette = limbs_palette
self.if_connection_setup = True
def render_kp3d_to_video(
self,
keypoints_np: np.ndarray,
output_path: Optional[str] = None,
convention='opencv',
fps: Union[float, int] = 30,
resolution: Iterable[int] = (720, 720),
visual_range: Iterable[int] = (-100, 100),
frame_names: Optional[List[str]] = None,
disable_limbs: bool = False,
return_array: bool = False,
) -> None:
"""Render 3d keypoints to a video.
Args:
keypoints_np (np.ndarray): shape of input array should be
(f * n * J * 3).
output_path (str): output video path or frame folder.
sign (Iterable[int], optional): direction of the axis.
Defaults to (1, 1, 1).
axis (str, optional): axis convention.
Defaults to 'xzy'.
fps (Union[float, int], optional): fps.
Defaults to 30.
resolution (Iterable[int], optional): (width, height) of
output video.
Defaults to (720, 720).
visual_range (Iterable[int], optional): range of axis value.
Defaults to (-100, 100).
frame_names (Optional[List[str]], optional): List of string
for frame title, no title if None. Defaults to None.
disable_limbs (bool, optional): whether need to disable drawing
limbs.
Defaults to False.
Returns:
None.
"""
assert self.if_camera_init is True
assert self.if_connection_setup is True
sign, axis = enc_camera_convention(convention)
if output_path is not None:
if check_path_suffix(output_path, ['.mp4', '.gif']):
self.temp_path = os.path.join(
Path(output_path).parent,
Path(output_path).name + '_output_temp')
mmcv.mkdir_or_exist(self.temp_path)
print('make dir', self.temp_path)
self.remove_temp = True
else:
self.temp_path = output_path
self.remove_temp = False
else:
self.temp_path = None
keypoints_np = _set_new_pose(keypoints_np, sign, axis)
if not self.if_frame_updated:
if self.cam_vector_list is None:
self._get_camera_vector_list(
frame_number=keypoints_np.shape[0])
assert len(self.cam_vector_list) == keypoints_np.shape[0]
if visual_range is None:
visual_range = self._get_visual_range(keypoints_np)
else:
visual_range = np.asarray(visual_range)
if len(visual_range.shape) == 1:
one_dim_visual_range = np.expand_dims(visual_range, 0)
visual_range = one_dim_visual_range.repeat(3, axis=0)
image_array = self._export_frames(keypoints_np, resolution,
visual_range, frame_names,
disable_limbs, return_array)
self.if_frame_updated = True
if output_path is not None:
if check_path_suffix(output_path, '.mp4'):
images_to_video(
self.temp_path,
output_path,
img_format='frame_%06d.png',
fps=fps)
return image_array
def _export_frames(self, keypoints_np, resolution, visual_range,
frame_names, disable_limbs, return_array):
"""Write output/temp images."""
image_array = []
for frame_index in range(keypoints_np.shape[0]):
keypoints_frame = keypoints_np[frame_index]
cam_ele, cam_hor = self.cam_vector_list[frame_index]
fig, ax = \
self._draw_scene(visual_range=visual_range, axis_len=0.5,
cam_elev_angle=cam_ele,
cam_hori_angle=cam_hor)
# draw limbs
num_person = keypoints_frame.shape[0]
for person_index, keypoints_person in enumerate(keypoints_frame):
if num_person >= 2:
self.limbs_palette = get_different_colors(
num_person)[person_index].reshape(-1, 3)
if not disable_limbs:
for part_name, limbs in self.limbs_connection.items():
if part_name == 'body':
linewidth = 2
else:
linewidth = 1
if isinstance(self.limbs_palette, np.ndarray):
color = self.limbs_palette.astype(
np.int32).reshape(-1, 3)
elif isinstance(self.limbs_palette, dict):
color = np.array(
self.limbs_palette[part_name]).astype(np.int32)
for limb_index, limb in enumerate(limbs):
limb_index = min(limb_index, len(color) - 1)
ax = _plot_line_on_fig(
ax,
keypoints_person[limb[0]],
keypoints_person[limb[1]],
color=np.array(color[limb_index]) / 255.0,
linewidth=linewidth)
scatter_points_index = list(
set(
np.array(self.limbs_connection['body']).reshape(
-1).tolist()))
ax.scatter(
keypoints_person[scatter_points_index, 0],
keypoints_person[scatter_points_index, 1],
keypoints_person[scatter_points_index, 2],
c=np.array([0, 0, 0]).reshape(1, -1),
s=10,
marker='o')
if num_person >= 2:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.zaxis.set_ticklabels([])
labels = []
custom_lines = []
for person_index in range(num_person):
color = get_different_colors(
num_person)[person_index].reshape(1, 3) / 255.0
custom_lines.append(
Line2D([0], [0],
linestyle='-',
color=color[0],
lw=2,
marker='',
markeredgecolor='k',
markeredgewidth=.1,
markersize=20))
labels.append(f'person_{person_index + 1}')
ax.legend(
handles=custom_lines,
labels=labels,
loc='upper left',
)
plt.close('all')
rgb_mat = _get_cv2mat_from_buf(fig)
resized_mat = cv2.resize(rgb_mat, resolution)
if frame_names is not None:
cv2.putText(
resized_mat, str(frame_names[frame_index]),
(resolution[0] // 10, resolution[1] // 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5 * resolution[0] / 500,
np.array([255, 255, 255]).astype(np.int32).tolist(), 2)
if self.temp_path is not None:
frame_path = os.path.join(self.temp_path,
'frame_%06d.png' % frame_index)
cv2.imwrite(frame_path, resized_mat)
if return_array:
image_array.append(resized_mat[None])
if return_array:
image_array = np.concatenate(image_array)
return image_array
else:
return None
def __del__(self):
"""remove temp images."""
self.remove_temp_frames()
def remove_temp_frames(self):
"""remove temp images."""
if self.temp_path is not None:
if Path(self.temp_path).is_dir() and self.remove_temp:
shutil.rmtree(self.temp_path)
def _set_new_pose(pose_np, sign, axis):
"""set new pose with axis convention."""
target_sign = [-1, 1, -1]
target_axis = ['x', 'z', 'y']
pose_rearrange_axis_result = pose_np.copy()
for axis_index, axis_name in enumerate(target_axis):
src_axis_index = axis.index(axis_name)
pose_rearrange_axis_result[..., axis_index] = \
pose_np[..., src_axis_index]
for dim_index in range(pose_rearrange_axis_result.shape[-1]):
pose_rearrange_axis_result[
..., dim_index] = sign[dim_index] / target_sign[
dim_index] * pose_rearrange_axis_result[..., dim_index]
return pose_rearrange_axis_result
def _plot_line_on_fig(ax,
point1_location,
point2_location,
color,
linewidth=1):
"""Draw line on fig with matplotlib."""
ax.plot([point1_location[0], point2_location[0]],
[point1_location[1], point2_location[1]],
[point1_location[2], point2_location[2]],
color=color,
linewidth=linewidth)
return ax
def _get_cv2mat_from_buf(fig, dpi=180):
"""Get numpy image from IO."""
buf = io.BytesIO()
fig.savefig(buf, format='png', dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
| 40.132212 | 79 | 0.533753 |
651a2f915529dc97f2d817be8a67a64930f8fe77 | 646 | py | Python | ComRISB/pygtool/gw_gutz/data2h5.py | comscope/comsuite | d51c43cad0d15dc3b4d1f45e7df777cdddaa9d6c | [
"BSD-3-Clause"
] | 18 | 2019-06-15T18:08:21.000Z | 2022-01-30T05:01:29.000Z | ComRISB/pygtool/gw_gutz/data2h5.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | null | null | null | ComRISB/pygtool/gw_gutz/data2h5.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | 11 | 2019-06-05T02:57:55.000Z | 2021-12-29T02:54:25.000Z | import h5py
import numpy as np
evals = np.loadtxt('eigenvalues.dat')
evals = evals.T
num = 22
numk = evals.shape[1]/num
with h5py.File('data.h5', 'w') as f:
for ik, ibase in enumerate(range(0, evals.shape[1], num)):
f['/evals/ik_{}'.format(ik)] = evals[2,ibase:ibase+num]
for ik in range(numk):
orbs = np.loadtxt('projector_{}.dat'.format(ik+1))
orbs = orbs.T
orbs = orbs[3] + 1.j*orbs[4]
orbs = orbs.reshape((2, 22, 5))
orbs = np.swapaxes(orbs, 1, 2)
orbs = orbs.reshape((10,22))
orbs = orbs.T.conj() # <psi_k | loc orb>
f['/psi_orb/ik_{}'.format(ik)] = orbs
| 28.086957 | 63 | 0.568111 |
ee7329fa3f2f719ec8b8fbb8ec8405f56b34b1a9 | 12,951 | py | Python | bookstore/bookstore_web/routes.py | stillimproving/e-bookstore | 48de381b2d805a6bd8cc4c912234e193483825ae | [
"MIT"
] | null | null | null | bookstore/bookstore_web/routes.py | stillimproving/e-bookstore | 48de381b2d805a6bd8cc4c912234e193483825ae | [
"MIT"
] | null | null | null | bookstore/bookstore_web/routes.py | stillimproving/e-bookstore | 48de381b2d805a6bd8cc4c912234e193483825ae | [
"MIT"
] | null | null | null | from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.datastructures import MultiDict
from bookstore.models import BooksDB, BookSearchCategory, OrderDB
from bookstore.models import CustomersDB, Customer
from bookstore.models import Cart
from bookstore.bookstore_web import app
from bookstore.bookstore_web.forms import EditUserForm, ChangePasswordForm, DeleteUserForm
from bookstore.bookstore_web.forms import LoginForm, SignupForm, SearchForm
NAME = 'E-BOOKSTORE'
CURRENCY = 'PLN'
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
discount = 25
books = BooksDB.search(category=BookSearchCategory.DISCOUNT, search_text=discount, operator='>=')[:5]
search = SearchForm(request.form)
if request.method == 'POST':
return search_results(search)
return render_template('index.html', global_title=NAME, position='../', after_title=" | Home", currency=CURRENCY,
books=books, search=search, discount=discount)
@app.route('/results')
def search_results(search):
search_string = search.data['search_input']
search_type = search.data['type']
if search_string == '':
flash('Empty string is not allowed!')
return redirect(url_for('index'))
enum_map = {
'Title': BookSearchCategory.TITLE,
'Author': BookSearchCategory.AUTHOR,
'Category': BookSearchCategory.CATEGORY,
'Publisher': BookSearchCategory.PUBLISHER,
'ISBN': BookSearchCategory.ISBN}
results = BooksDB.search(category=enum_map[search_type], search_text=search_string)
if not results:
flash('No results found for ' + search_type + ': "' + search_string + '"!')
return redirect('/')
return render_template('results.html', global_title=NAME, position='../', after_title=" | Search results",
search=search, results=results)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
login_form = LoginForm(prefix='log')
if login_form.submit.data and login_form.validate_on_submit():
# flash('Login requested for user {}, remember_me={}'.format(form.usermail.data, form.remember_me.data))
customer = CustomersDB.get(login_form.usermail.data)
if not customer or not customer.check_pass(login_form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(customer, remember=login_form.remember_me.data)
return redirect(url_for('index'))
signup_form = SignupForm(prefix='sign')
if signup_form.submit.data and signup_form.validate_on_submit():
if CustomersDB.get(signup_form.email.data):
flash('Given e-mail already registered')
return redirect(url_for('login'))
new_user = Customer(
name=signup_form.name.data,
surname=signup_form.surname.data,
password=signup_form.password.data,
street=signup_form.street.data,
email=signup_form.email.data,
phone=signup_form.phone.data,
postal_code=signup_form.postal_code.data,
city=signup_form.city.data,
country=signup_form.country.data
)
success = CustomersDB.add(new_user)
if success:
flash('You are registered, plaease Log in!')
else:
flash('Something gone wrong, try again')
return redirect(url_for('login'))
return render_template('login.html', global_title=NAME, position='../', after_title=' | Log In',
login_form=login_form, signup_form=signup_form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/user')
@login_required
def user():
return render_template('user.html', global_title=NAME, position='../', after_title=' | Profile')
@app.route('/edit_user', methods=['GET', 'POST'])
@login_required
def edit_user():
if request.method == 'GET':
edit_user_form = EditUserForm(
formdata=MultiDict({
'name': current_user.name,
'surname': current_user.surname,
'phone': current_user.phone,
'street': current_user.street,
'postal_code': current_user.postal_code,
'city': current_user.city,
'country': current_user.country
})
)
else:
edit_user_form = EditUserForm()
if edit_user_form.validate_on_submit():
updated_user = Customer(
customer_id=current_user.customer_id,
email=current_user.email,
password=current_user.password,
name=edit_user_form.name.data,
surname=edit_user_form.surname.data,
street=edit_user_form.street.data,
phone=edit_user_form.phone.data,
postal_code=edit_user_form.postal_code.data,
city=edit_user_form.city.data,
country=edit_user_form.country.data
)
success = CustomersDB.update(updated_user)
if success:
flash('Your data has been successfully edited!')
return redirect(url_for('user'))
else:
flash('Something gone wrong, try again')
return redirect(url_for('edit_user'))
return render_template('edit_user.html', global_title=NAME, position='../', after_title=' | Edit profile',
edit_user_form=edit_user_form)
@app.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
change_pass_form = ChangePasswordForm()
if change_pass_form.validate_on_submit():
if change_pass_form.old_password.data == current_user.password:
new_pass_user = Customer(
customer_id=current_user.customer_id,
email=current_user.email,
password=change_pass_form.new_password.data,
name=current_user.name,
surname=current_user.surname,
street=current_user.street,
phone=current_user.phone,
postal_code=current_user.postal_code,
city=current_user.city,
country=current_user.country
)
success = CustomersDB.update(new_pass_user)
if success:
flash('Your password has been successfully changed!')
return redirect(url_for('user'))
else:
flash('Something gone wrong, try again')
return redirect(url_for('change_password'))
else:
flash('Invalid old password')
return redirect(url_for('change_password'))
return render_template('change_password.html', global_title=NAME, position='../', after_title=' | Change password',
change_pass_form=change_pass_form)
@app.route('/delete_user', methods=['GET', 'POST'])
@login_required
def delete_user():
delete_form = DeleteUserForm()
if delete_form.is_submitted():
del_user = current_user
success = CustomersDB.delete(del_user)
if success:
flash('Goodbye! :(')
return redirect(url_for('logout'))
else:
flash('Something gone wrong, try again')
return redirect(url_for('delete_user'))
return render_template('delete_user.html', global_title=NAME, position='../', after_title=' | Delete account',
delete_form=delete_form)
@app.route('/item/<book_id>', methods=['GET', 'POST'])
def item(book_id):
book = BooksDB.get(key=book_id)
return render_template('item.html', global_title=NAME, position='../../', after_title=" | " + book.title,
currency=CURRENCY, book=book) # , add_to_cart_form=add_to_cart_form
@app.route('/add_to_cart/<book_id>', methods=['GET'])
@login_required
def add_to_cart(book_id):
user_cart = Cart.get_user_cart(current_user.customer_id)
count = 0
if user_cart:
count = user_cart.get(book_id, 0)
Cart.add_to_cart(current_user.customer_id, book_id, count + 1)
return "nothing"
@app.route('/cart')
@login_required
def cart():
ids_cart = Cart.get_user_cart(current_user.customer_id)
user_cart = dict()
invalid_cart_items = []
no_discount_total = total = 0
if ids_cart:
for book_id, quantity in ids_cart.items():
book = BooksDB.get(key=book_id)
if quantity <= book.quantity:
user_cart[book] = quantity
no_discount_total += book.price*quantity
total += (book.price - book.price*book.discount/100)*quantity
elif quantity > book.quantity != 0:
user_cart[book] = book.quantity
Cart.add_to_cart(current_user.customer_id, book_id, book.quantity)
no_discount_total += book.price*book.quantity
total += (book.price - book.price*book.discount/100)*book.quantity
else:
invalid_cart_items.append((book_id, book))
for invalid in invalid_cart_items:
Cart.remove_from_cart(current_user.customer_id, invalid[0])
user_cart.pop(invalid[1], None)
return render_template('cart.html', global_title=NAME, position='../', after_title=" | Cart", currency=CURRENCY,
user_cart=user_cart, no_discount_total=no_discount_total, total=total)
@app.route('/refresh_cart_item/<book_id>', methods=['GET', 'POST'])
@login_required
def refresh_cart_item(book_id):
quantity = int(request.args.get('quantity', '0'))
if not quantity:
flash('Something wrong, quantity < 1')
success = Cart.add_to_cart(current_user.customer_id, book_id, quantity)
if success:
flash('Item has been updated')
else:
flash('Something gone wrong, try again')
return redirect(url_for('cart'))
@app.route('/remove_cart_item/<book_id>', methods=['GET', 'POST'])
@login_required
def remove_cart_item(book_id):
success = Cart.remove_from_cart(current_user.customer_id, book_id)
if success:
flash('Item has been removed from your cart')
else:
flash('Something gone wrong, try again')
return redirect(url_for('cart'))
@app.route('/order')
@login_required
def order():
missing_fields = []
if not current_user.phone:
missing_fields.append('Phone')
if not current_user.street:
missing_fields.append('Street')
if not current_user.postal_code:
missing_fields.append('Postal code')
if not current_user.city:
missing_fields.append('City')
if not current_user.country:
missing_fields.append('Country')
ids_cart = Cart.get_user_cart(current_user.customer_id)
user_cart = []
if not ids_cart:
flash('System reboot while refreshing order page. Unfortunately you have to complete your cart again')
for book_id, quantity in ids_cart.items():
book = BooksDB.get(key=book_id)
user_cart.append({
'name': book.author + ": " + book.title + " (" + str(book.release) + ")",
'price': book.price - book.price * book.discount / 100,
'quantity': quantity,
'cost': quantity * (book.price - book.price * book.discount / 100)
})
total = 0
for books_in_cart in user_cart:
total += books_in_cart['cost']
return render_template('order.html', global_title=NAME, position='../', after_title=" | Order", currency=CURRENCY,
missing_fields=missing_fields, user_cart=user_cart, total=total)
@app.route('/buy')
@login_required
def buy():
ids_cart = Cart.get_user_cart(current_user.customer_id)
user_cart = []
for book_id, quantity in ids_cart.items():
book = BooksDB.get(key=book_id)
user_cart.append({
'name': book.author + ": " + book.title + " (" + str(book.release) + ")",
'price': book.price - book.price * book.discount / 100,
'quantity': quantity,
'cost': quantity * (book.price - book.price * book.discount / 100)
})
total = 0
for books_in_cart in user_cart:
total += books_in_cart['cost']
order_id = OrderDB.submit_order(customer=current_user, total_price=total)
return render_template('buy.html', global_title=NAME, position='../', after_title=' | Payment', order_id=order_id)
@app.route('/terms_of_use')
def terms_of_use():
return render_template('terms_of_use.html', global_title=NAME, position='../', after_title=' | Terms of use')
@app.route('/privacy_policy')
def privacy_policy():
return render_template('privacy_policy.html', global_title=NAME, position='../', after_title=' | Privacy policy')
| 39.726994 | 119 | 0.644429 |
caaace50ce5ac2264cbd572cfb6171cdb44ca840 | 2,920 | py | Python | pydeepspeech/wav_transcriber.py | Soebb/pydeepspeech | 968ce5b17c8904902fe5e83cf412c434e793c3fc | [
"MIT"
] | null | null | null | pydeepspeech/wav_transcriber.py | Soebb/pydeepspeech | 968ce5b17c8904902fe5e83cf412c434e793c3fc | [
"MIT"
] | null | null | null | pydeepspeech/wav_transcriber.py | Soebb/pydeepspeech | 968ce5b17c8904902fe5e83cf412c434e793c3fc | [
"MIT"
] | null | null | null | # pylint: skip-file
import glob
import logging
from timeit import default_timer as timer
import webrtcvad # type: ignore
from deepspeech import Model # type: ignore
import pydeepspeech.wav_split as wav_split
"""
Load the pre-trained model into the memory
@param models: Output Grapgh Protocol Buffer file
@param scorer: Scorer file
@Retval
Returns a list [DeepSpeech Object, Model Load Time, Scorer Load Time]
"""
def load_model(models, scorer):
model_load_start = timer()
ds = Model(models)
model_load_end = timer() - model_load_start
logging.debug("Loaded model in %0.3fs." % (model_load_end))
scorer_load_start = timer()
ds.enableExternalScorer(scorer)
scorer_load_end = timer() - scorer_load_start
logging.debug("Loaded external scorer in %0.3fs." % (scorer_load_end))
return [ds, model_load_end, scorer_load_end]
"""
Run Inference on input audio file
@param ds: Deepspeech object
@param audio: Input audio for running inference on
@param fs: Sample rate of the input audio file
@Retval:
Returns a list [Inference, Inference Time, Audio Length]
"""
def stt(ds, audio, fs):
inference_time = 0.0
audio_length = len(audio) * (1 / fs)
# Run Deepspeech
logging.debug("Running inference...")
inference_start = timer()
output = ds.stt(audio)
inference_end = timer() - inference_start
inference_time += inference_end
logging.debug(
"Inference took %0.3fs for %0.3fs audio file."
% (inference_end, audio_length)
)
return [output, inference_time]
"""
Resolve directory path for the models and fetch each of them.
@param dirName: Path to the directory containing pre-trained models
@Retval:
Retunns a tuple containing each of the model files (pb, scorer)
"""
def resolve_models(dirName):
pb = glob.glob(dirName + "/*.pbmm")[0]
logging.debug("Found Model: %s" % pb)
scorer = glob.glob(dirName + "/*.scorer")[0]
logging.debug("Found scorer: %s" % scorer)
return pb, scorer
"""
Generate VAD segments. Filters out non-voiced audio frames.
@param waveFile: Input wav file to run VAD on.0
@Retval:
Returns tuple of
segments: a bytearray of multiple smaller audio frames
(The longer audio split into mutiple smaller one's)
sample_rate: Sample rate of the input audio file
audio_length: Duraton of the input audio file
"""
def vad_segment_generator(wavFile, aggressiveness):
logging.debug("Caught the wav file @: %s" % (wavFile))
audio, sample_rate, audio_length = wav_split.read_wave(wavFile)
assert (
sample_rate == 16000
), "Only 16000Hz input WAV files are supported for now!"
vad = webrtcvad.Vad(int(aggressiveness))
frames = wav_split.frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = wav_split.vad_collector(sample_rate, 30, 300, vad, frames)
return segments, sample_rate, audio_length
| 26.306306 | 74 | 0.708904 |
63e907d23c6449656226369f36631c2d61a23326 | 179 | py | Python | irelia/__init__.py | bgraver/Irelia | d1785fac7a3dadfa3af523f0637f5a838f830408 | [
"MIT"
] | null | null | null | irelia/__init__.py | bgraver/Irelia | d1785fac7a3dadfa3af523f0637f5a838f830408 | [
"MIT"
] | null | null | null | irelia/__init__.py | bgraver/Irelia | d1785fac7a3dadfa3af523f0637f5a838f830408 | [
"MIT"
] | null | null | null | from irelia.esports_lib import Lolesports
headers = {'x-api-key': '0TvQnueqKa5mxJntVWt0w4LpLfEkrV1Ta8rQBb9Z'}
params = {"hl": "en-US"}
s11_start_date = "2021-01-01"
| 17.9 | 68 | 0.698324 |
693a6294de173541f5b91fd437fd0ae3c6046fa8 | 2,104 | py | Python | Python/phonenumbers/data/region_JM.py | skykisl/uberbruns2 | 26933efce04dba700d93cc75c7b74e069fb02d26 | [
"Unlicense"
] | 5 | 2015-04-27T20:10:56.000Z | 2018-06-14T18:19:09.000Z | python/phonenumbers/data/region_JM.py | vemel/python-phonenumbers | 595c322bf12106a3b95e3f202e948a7c6b6c15b8 | [
"Apache-2.0"
] | 2 | 2017-06-08T16:11:13.000Z | 2018-05-07T11:50:13.000Z | python/phonenumbers/data/region_JM.py | vemel/python-phonenumbers | 595c322bf12106a3b95e3f202e948a7c6b6c15b8 | [
"Apache-2.0"
] | 6 | 2015-02-19T11:11:04.000Z | 2022-03-15T19:38:31.000Z | """Auto-generated file, do not edit by hand. JM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_JM = PhoneMetadata(id='JM', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='[589]\\d{9}', possible_number_pattern='\\d{7}(?:\\d{3})?'),
fixed_line=PhoneNumberDesc(national_number_pattern='876(?:5(?:0[12]|1[0-468]|2[35]|63)|6(?:0[1-3579]|1[027-9]|[23]\\d|40|5[06]|6[2-489]|7[05]|8[04]|9[4-9])|7(?:0[2-689]|[1-6]\\d|8[056]|9[45])|9(?:0[1-8]|1[02378]|[2-8]\\d|9[2-468]))\\d{4}', possible_number_pattern='\\d{7}(?:\\d{3})?', example_number='8765123456'),
mobile=PhoneNumberDesc(national_number_pattern='876(?:2[1789]\\d|[348]\\d{2}|5(?:08|27|6[0-24-9]|[3-578]\\d)|7(?:0[07]|7\\d|8[1-47-9]|9[0-36-9])|9(?:[01]9|9[0579]))\\d{4}', possible_number_pattern='\\d{10}', example_number='8762101234'),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|55|66|77|88)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='8002123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='9002123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='5(?:00|33|44)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='5002345678'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='11[09]', possible_number_pattern='\\d{3}', example_number='119'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='1',
national_prefix_for_parsing='1',
leading_digits='876')
| 100.190476 | 318 | 0.72576 |
3aa467a38e2b50e3c1a5d1fccaf89dba7d212b61 | 7,906 | py | Python | tests/test_sampling.py | Rodrigo-Tenorio/nessai | 2b4175da61b3a7250d1154a126ad93481836df0d | [
"MIT"
] | 16 | 2021-02-18T00:04:54.000Z | 2021-09-01T03:25:45.000Z | tests/test_sampling.py | Rodrigo-Tenorio/nessai | 2b4175da61b3a7250d1154a126ad93481836df0d | [
"MIT"
] | 59 | 2021-03-09T11:05:37.000Z | 2022-03-30T14:21:14.000Z | tests/test_sampling.py | Rodrigo-Tenorio/nessai | 2b4175da61b3a7250d1154a126ad93481836df0d | [
"MIT"
] | 1 | 2022-03-25T12:28:16.000Z | 2022-03-25T12:28:16.000Z | # -*- coding: utf-8 -*-
"""
Integration tests for running the sampler with different configurations.
"""
import os
import torch
import pytest
import numpy as np
from nessai.flowsampler import FlowSampler
from nessai.livepoint import numpy_array_to_live_points
from nessai.model import Model
torch.set_num_threads(1)
@pytest.mark.slow_integration_test
def test_sampling_with_rescale(model, flow_config, tmpdir):
"""
Test sampling with rescaling. Checks that flow is trained.
"""
output = str(tmpdir.mkdir('w_rescale'))
fp = FlowSampler(model, output=output, resume=False, nlive=100, plot=False,
flow_config=flow_config, training_frequency=10,
maximum_uninformed=9, rescale_parameters=True,
seed=1234, max_iteration=11, poolsize=10, max_threads=1)
fp.run()
assert fp.ns.proposal.flow.weights_file is not None
assert fp.ns.proposal.training_count == 1
@pytest.mark.slow_integration_test
def test_sampling_with_inversion(model, flow_config, tmpdir):
"""
Test sampling with inversion. Checks that flow is trained.
"""
output = str(tmpdir.mkdir('w_rescale'))
fp = FlowSampler(model, output=output, resume=False, nlive=100, plot=False,
flow_config=flow_config, training_frequency=10,
maximum_uninformed=9, rescale_parameters=True,
seed=1234, max_iteration=11, poolsize=10, max_threads=1,
boundary_inversion=True, update_bounds=True)
fp.run()
assert fp.ns.proposal.boundary_inversion == ['x', 'y']
assert fp.ns.proposal.flow.weights_file is not None
assert fp.ns.proposal.training_count == 1
@pytest.mark.slow_integration_test
def test_sampling_without_rescale(model, flow_config, tmpdir):
"""
Test sampling without rescaling. Checks that flow is trained.
"""
output = str(tmpdir.mkdir('wo_rescale'))
fp = FlowSampler(model, output=output, resume=False, nlive=100, plot=False,
flow_config=flow_config, training_frequency=10,
maximum_uninformed=9, rescale_parameters=False, seed=1234,
max_iteration=11, poolsize=10)
fp.run()
assert fp.ns.proposal.flow.weights_file is not None
assert fp.ns.proposal.training_count == 1
@pytest.mark.slow_integration_test
def test_sampling_with_maf(model, flow_config, tmpdir):
"""
Test sampling with MAF. Checks that flow is trained but does not
check convergence.
"""
flow_config['model_config']['ftype'] = 'maf'
output = str(tmpdir.mkdir('maf'))
fp = FlowSampler(model, output=output, resume=False, nlive=100, plot=False,
flow_config=flow_config, training_frequency=10,
maximum_uninformed=9, rescale_parameters=True,
seed=1234, max_iteration=11, poolsize=10)
fp.run()
assert fp.ns.proposal.flow.weights_file is not None
assert fp.ns.proposal.training_count == 1
@pytest.mark.slow_integration_test
@pytest.mark.parametrize('analytic', [False, True])
def test_sampling_uninformed(model, flow_config, tmpdir, analytic):
"""
Test running the sampler with the two uninformed proposal methods.
"""
output = str(tmpdir.mkdir('uninformed'))
fp = FlowSampler(model, output=output, resume=False, nlive=100, plot=False,
flow_config=flow_config, training_frequency=None,
maximum_uninformed=10, rescale_parameters=True,
seed=1234, max_iteration=11, poolsize=10,
analytic_proposal=analytic)
fp.run()
@pytest.mark.slow_integration_test
def test_sampling_with_n_pool(model, flow_config, tmpdir):
"""
Test running the sampler with multiprocessing.
"""
output = str(tmpdir.mkdir('pool'))
fp = FlowSampler(model, output=output, resume=False, nlive=100, plot=False,
flow_config=flow_config, training_frequency=10,
maximum_uninformed=9, rescale_parameters=True,
seed=1234, max_iteration=11, poolsize=10, max_threads=3,
n_pool=2)
fp.run()
assert fp.ns.proposal.flow.weights_file is not None
assert fp.ns.proposal.training_count == 1
assert os.path.exists(output + '/result.json')
@pytest.mark.slow_integration_test
def test_sampling_resume(model, flow_config, tmpdir):
"""
Test resuming the sampler.
"""
output = str(tmpdir.mkdir('resume'))
fp = FlowSampler(model, output=output, resume=True, nlive=100, plot=False,
flow_config=flow_config, training_frequency=10,
maximum_uninformed=9, rescale_parameters=True,
seed=1234, max_iteration=11, poolsize=10)
fp.run()
assert os.path.exists(os.path.join(output, 'nested_sampler_resume.pkl'))
fp = FlowSampler(model, output=output, resume=True,
flow_config=flow_config)
assert fp.ns.iteration == 11
fp.ns.max_iteration = 21
fp.run()
assert fp.ns.iteration == 21
assert os.path.exists(
os.path.join(output, 'nested_sampler_resume.pkl.old'))
@pytest.mark.slow_integration_test
def test_sampling_resume_no_max_uninformed(model, flow_config, tmpdir):
"""
Test resuming the sampler when there is no maximum iteration for
the uinformed sampling.
This test makes sure the correct proposal is loaded after resuming
and re-initialising the sampler.
"""
output = str(tmpdir.mkdir('resume'))
fp = FlowSampler(model, output=output, resume=True, nlive=100, plot=False,
flow_config=flow_config, training_frequency=10,
maximum_uninformed=9, rescale_parameters=True,
seed=1234, max_iteration=11, poolsize=10)
fp.run()
assert os.path.exists(os.path.join(output, 'nested_sampler_resume.pkl'))
fp = FlowSampler(model, output=output, resume=True,
flow_config=flow_config)
assert fp.ns.iteration == 11
fp.ns.maximum_uninformed = np.inf
fp.ns.initialise()
assert fp.ns.proposal is fp.ns._flow_proposal
fp.ns.max_iteration = 21
fp.run()
assert fp.ns.iteration == 21
assert os.path.exists(
os.path.join(output, 'nested_sampler_resume.pkl.old'))
@pytest.mark.slow_integration_test
def test_sampling_with_infinite_prior_bounds(tmpdir):
"""
Make sure the sampler runs when sampling a parameter with infinite prior \
bounds.
"""
from scipy.stats import norm
output = str(tmpdir.mkdir('infinite_bounds'))
class TestModel(Model):
names = ['x', 'y']
bounds = {'x': [0, 1], 'y': [-np.inf, np.inf]}
reparameterisations = {'x': 'default', 'y': None}
def new_point(self, N=1):
x = np.concatenate([
np.random.rand(N, 1),
np.random.randn(N, 1)
], axis=1)
return numpy_array_to_live_points(x, self.names)
def log_prior(self, x):
log_p = np.log(self.in_bounds(x))
log_p += norm.logpdf(x['y'])
return log_p
def log_likelihood(self, x):
log_l = np.zeros(x.size)
for n in self.names:
log_l += norm.logpdf(x[n])
return log_l
fs = FlowSampler(
TestModel(),
output=output,
nlive=500,
plot=False,
proposal_plots=False
)
fs.run(plot=False)
assert fs.ns.condition <= 0.1
@pytest.mark.slow_integration_test
def test_constant_volume_mode(model, tmpdir):
"""Test sampling in constant volume mode"""
output = str(tmpdir.mkdir('test'))
fs = FlowSampler(
model,
output=output,
nlive=500,
plot=False,
proposal_plots=False,
constant_volume_mode=True
)
fs.run(plot=False)
| 35.294643 | 79 | 0.650519 |
396358b87f03f8cf892084e2ec39f506635d7fdf | 46 | py | Python | Lib/pylib/__init__.py | cjroehrig/crispyControllers | 6a56f7a61d66068824f4fd4b3c46d5b95585439d | [
"BSD-2-Clause"
] | null | null | null | Lib/pylib/__init__.py | cjroehrig/crispyControllers | 6a56f7a61d66068824f4fd4b3c46d5b95585439d | [
"BSD-2-Clause"
] | null | null | null | Lib/pylib/__init__.py | cjroehrig/crispyControllers | 6a56f7a61d66068824f4fd4b3c46d5b95585439d | [
"BSD-2-Clause"
] | null | null | null | # This is required to be considered a module.
| 23 | 45 | 0.76087 |
7511f977d68d8db864d7b5e67e9faaaf3a606c1f | 2,128 | py | Python | tests/integration/player_test.py | egret85/echovr-api | e135f25fb5b188e2931133d04c47c5e66e83a6c5 | [
"MIT"
] | 7 | 2018-11-02T18:12:18.000Z | 2021-03-08T10:47:59.000Z | tests/integration/player_test.py | egret85/echovr-api | e135f25fb5b188e2931133d04c47c5e66e83a6c5 | [
"MIT"
] | null | null | null | tests/integration/player_test.py | egret85/echovr-api | e135f25fb5b188e2931133d04c47c5e66e83a6c5 | [
"MIT"
] | 4 | 2018-11-02T18:12:08.000Z | 2020-06-19T19:42:39.000Z | import pytest
@pytest.fixture
def player(standard_public_match_gamestate):
return standard_public_match_gamestate.teams[0].players[0]
def test_name(player):
assert player.name == "Bob"
def test_playerid(player):
assert player.playerid == 0
def test_userid(player):
assert player.userid == 4814054792376258
def test_level(player):
assert player.level == 8
def test_number(player):
assert player.number == 76
def test_possession(player):
assert player.possession == False
def test_stunned(player):
assert player.stunned == False
def test_blocking(player):
assert player.blocking == False
def test_invulnerable(player):
assert player.invulnerable == False
def test_position(player):
assert player.position != None
assert player.position.x == -10.598001
assert player.position.y == 3.9720001
assert player.position.z == 26.736002
def test_velocity(player):
assert player.velocity != None
assert player.velocity.x == -2.131
assert player.velocity.y == -0.63000005
assert player.velocity.z == 0.33400002
def test_lhand(player):
assert player.lhand != None
assert player.lhand.x == -10.419001
assert player.lhand.y == 3.5290003
assert player.lhand.z == 26.732
def test_rhand(player):
assert player.rhand != None
assert player.rhand.x == -10.416
assert player.rhand.y == 3.5430002
assert player.rhand.z == 26.869001
def test_forward(player):
assert player.forward != None
assert player.forward.x == 0.57100004
assert player.forward.y == -0.26800001
assert player.forward.z == -0.77600002
def test_left(player):
assert player.left != None
assert player.left.x == -0.80800003
assert player.left.y == -0.017000001
assert player.left.z == -0.58900005
def test_up(player):
assert player.up != None
assert player.up.x == 0.14500001
assert player.up.y == 0.96300006
assert player.up.z == -0.22600001
def test_stats(player):
assert player.stats != None
assert player.stats.possession_time == 12.294746
def test_username(player):
assert player.username == "Bob"
| 25.95122 | 62 | 0.703947 |
8442560c7a88c45d382a3a96bac67b0654db0e4c | 574 | py | Python | balanced-binary-tree/balanced-binary-tree.py | rams1996/Trees | b7d6a92ed76a9e4d01bfe7f85debb7ca2b350ac9 | [
"MIT"
] | null | null | null | balanced-binary-tree/balanced-binary-tree.py | rams1996/Trees | b7d6a92ed76a9e4d01bfe7f85debb7ca2b350ac9 | [
"MIT"
] | null | null | null | balanced-binary-tree/balanced-binary-tree.py | rams1996/Trees | b7d6a92ed76a9e4d01bfe7f85debb7ca2b350ac9 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
def dfs(root):
if not root:
return 0
l=dfs(root.left)
r=dfs(root.right)
if abs(l-r)>1 or l==-1 or r==-1:
return -1
return max(l,r)+1
if dfs(root)==-1:
return False
else:
return True
| 23.916667 | 49 | 0.447735 |
3a1e9108a8cd4899977745d0dda89fcc78f83181 | 2,923 | py | Python | e2e_tests/tests/fixtures/pytorch_lightning_amp/mnist.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | 1,729 | 2020-04-27T17:36:40.000Z | 2022-03-31T05:48:39.000Z | e2e_tests/tests/fixtures/pytorch_lightning_amp/mnist.py | ChrisW09/determined | 5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0 | [
"Apache-2.0"
] | 1,940 | 2020-04-27T17:34:14.000Z | 2022-03-31T23:02:28.000Z | e2e_tests/tests/fixtures/pytorch_lightning_amp/mnist.py | ChrisW09/determined | 5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0 | [
"Apache-2.0"
] | 214 | 2020-04-27T19:57:28.000Z | 2022-03-29T08:17:16.000Z | # The class LitMNIST is modified from the Pytorch Lightning example:
# https://colab.research.google.com/github/PytorchLightning/pytorch-lightning/
# blob/master/notebooks/01-mnist-hello-world.ipynb#scrollTo=4DNItffri95Q
#
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.metrics.functional import accuracy
import data
class LitMNIST(pl.LightningModule):
def __init__(self, hidden_size=64, learning_rate=2e-4):
super().__init__()
# Set our init args as class attributes
self.hidden_size = hidden_size
self.learning_rate = learning_rate
# Hardcode some dataset specific attributes
self.num_classes = 10
self.dims = (1, 28, 28)
channels, width, height = self.dims
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# Define PyTorch model
self.model = nn.Sequential(
nn.Flatten(),
nn.Linear(channels * width * height, hidden_size),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_size, self.num_classes)
)
def forward(self, x):
x = self.model(x)
return F.log_softmax(x, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
self.log('train_loss', loss)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
return {'val_loss': loss, 'accuracy': acc}
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
if __name__ == '__main__':
model = LitMNIST()
trainer = pl.Trainer(max_epochs=3, default_root_dir='/tmp/lightning')
dm = data.MNISTDataModule('https://s3-us-west-2.amazonaws.com/determined-ai-test-data/pytorch_mnist.tar.gz')
trainer.fit(model, datamodule=dm)
| 33.215909 | 112 | 0.661991 |
51afa0cb1a58f33ba5147af912a3f3d17ed2f071 | 3,607 | py | Python | test/functional/wallet_keypool.py | dyennet/bitcoin | be992701b018f256db6d64786624be4cb60d8975 | [
"MIT"
] | 4,424 | 2015-10-19T19:04:02.000Z | 2022-03-21T12:11:29.000Z | test/functional/wallet_keypool.py | dyennet/bitcoin | be992701b018f256db6d64786624be4cb60d8975 | [
"MIT"
] | 316 | 2015-12-05T21:55:16.000Z | 2022-03-05T12:28:43.000Z | test/functional/wallet_keypool.py | dyennet/bitcoin | be992701b018f256db6d64786624be4cb60d8975 | [
"MIT"
] | 208 | 2016-06-17T23:47:57.000Z | 2022-03-16T09:11:26.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet keypool and interaction with wallet encryption/locking."""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class KeyPoolTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].getaddressinfo(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert_equal(wallet_info_old['hdseedid'], wallet_info_old['hdmasterkeyid'])
assert(addr_before_encrypting_data['hdseedid'] == wallet_info_old['hdseedid'])
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].getaddressinfo(addr)
wallet_info = nodes[0].getwalletinfo()
assert_equal(wallet_info['hdseedid'], wallet_info['hdmasterkeyid'])
assert(addr_before_encrypting_data['hdseedid'] != wallet_info['hdseedid'])
assert(addr_data['hdseedid'] == wallet_info['hdseedid'])
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 6)
assert_equal(wi['keypoolsize'], 6)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
# drain the external keys
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
assert(len(addr) == 6)
# the next one should fail
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
if __name__ == '__main__':
KeyPoolTest().main()
| 40.077778 | 119 | 0.669809 |
f0c6d060b50b62b29ffbfef3dc2d7bad7f920914 | 15,251 | py | Python | sdk/python/pulumi_azure_nextgen/insights/latest/component.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/insights/latest/component.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/insights/latest/component.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = ['Component']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:insights:Component'.""", DeprecationWarning)
class Component(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:insights:Component'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_type: Optional[pulumi.Input[Union[str, 'ApplicationType']]] = None,
disable_ip_masking: Optional[pulumi.Input[bool]] = None,
flow_type: Optional[pulumi.Input[Union[str, 'FlowType']]] = None,
hockey_app_id: Optional[pulumi.Input[str]] = None,
immediate_purge_data_on30_days: Optional[pulumi.Input[bool]] = None,
ingestion_mode: Optional[pulumi.Input[Union[str, 'IngestionMode']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
request_source: Optional[pulumi.Input[Union[str, 'RequestSource']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
retention_in_days: Optional[pulumi.Input[int]] = None,
sampling_percentage: Optional[pulumi.Input[float]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An Application Insights component definition.
Latest API Version: 2015-05-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'ApplicationType']] application_type: Type of application being monitored.
:param pulumi.Input[bool] disable_ip_masking: Disable IP masking.
:param pulumi.Input[Union[str, 'FlowType']] flow_type: Used by the Application Insights system to determine what kind of flow this component was created by. This is to be set to 'Bluefield' when creating/updating a component via the REST API.
:param pulumi.Input[str] hockey_app_id: The unique application ID created when a new application is added to HockeyApp, used for communications with HockeyApp.
:param pulumi.Input[bool] immediate_purge_data_on30_days: Purge data immediately after 30 days.
:param pulumi.Input[Union[str, 'IngestionMode']] ingestion_mode: Indicates the flow of the ingestion.
:param pulumi.Input[str] kind: The kind of application that this component refers to, used to customize UI. This value is a freeform string, values should typically be one of the following: web, ios, other, store, java, phone.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[Union[str, 'RequestSource']] request_source: Describes what tool created this Application Insights component. Customers using this API should set this to the default 'rest'.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] resource_name_: The name of the Application Insights component resource.
:param pulumi.Input[int] retention_in_days: Retention period in days.
:param pulumi.Input[float] sampling_percentage: Percentage of the data produced by the application being monitored that is being sampled for Application Insights telemetry.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.log.warn("Component is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:insights:Component'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if application_type is None:
application_type = 'web'
if application_type is None and not opts.urn:
raise TypeError("Missing required property 'application_type'")
__props__['application_type'] = application_type
__props__['disable_ip_masking'] = disable_ip_masking
if flow_type is None:
flow_type = 'Bluefield'
__props__['flow_type'] = flow_type
__props__['hockey_app_id'] = hockey_app_id
__props__['immediate_purge_data_on30_days'] = immediate_purge_data_on30_days
if ingestion_mode is None:
ingestion_mode = 'ApplicationInsights'
__props__['ingestion_mode'] = ingestion_mode
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__['kind'] = kind
__props__['location'] = location
if request_source is None:
request_source = 'rest'
__props__['request_source'] = request_source
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['resource_name'] = resource_name_
if retention_in_days is None:
retention_in_days = 90
__props__['retention_in_days'] = retention_in_days
__props__['sampling_percentage'] = sampling_percentage
__props__['tags'] = tags
__props__['app_id'] = None
__props__['application_id'] = None
__props__['connection_string'] = None
__props__['creation_date'] = None
__props__['hockey_app_token'] = None
__props__['instrumentation_key'] = None
__props__['name'] = None
__props__['private_link_scoped_resources'] = None
__props__['provisioning_state'] = None
__props__['tenant_id'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights:Component"), pulumi.Alias(type_="azure-nextgen:insights/v20150501:Component"), pulumi.Alias(type_="azure-nextgen:insights/v20180501preview:Component"), pulumi.Alias(type_="azure-nextgen:insights/v20200202preview:Component")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Component, __self__).__init__(
'azure-nextgen:insights/latest:Component',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Component':
"""
Get an existing Component resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Component(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Output[str]:
"""
Application Insights Unique ID for your Application.
"""
return pulumi.get(self, "app_id")
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> pulumi.Output[str]:
"""
The unique ID of your application. This field mirrors the 'Name' field and cannot be changed.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter(name="applicationType")
def application_type(self) -> pulumi.Output[str]:
"""
Type of application being monitored.
"""
return pulumi.get(self, "application_type")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> pulumi.Output[str]:
"""
Application Insights component connection string.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
Creation Date for the Application Insights component, in ISO 8601 format.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter(name="disableIpMasking")
def disable_ip_masking(self) -> pulumi.Output[Optional[bool]]:
"""
Disable IP masking.
"""
return pulumi.get(self, "disable_ip_masking")
@property
@pulumi.getter(name="flowType")
def flow_type(self) -> pulumi.Output[Optional[str]]:
"""
Used by the Application Insights system to determine what kind of flow this component was created by. This is to be set to 'Bluefield' when creating/updating a component via the REST API.
"""
return pulumi.get(self, "flow_type")
@property
@pulumi.getter(name="hockeyAppId")
def hockey_app_id(self) -> pulumi.Output[Optional[str]]:
"""
The unique application ID created when a new application is added to HockeyApp, used for communications with HockeyApp.
"""
return pulumi.get(self, "hockey_app_id")
@property
@pulumi.getter(name="hockeyAppToken")
def hockey_app_token(self) -> pulumi.Output[str]:
"""
Token used to authenticate communications with between Application Insights and HockeyApp.
"""
return pulumi.get(self, "hockey_app_token")
@property
@pulumi.getter(name="immediatePurgeDataOn30Days")
def immediate_purge_data_on30_days(self) -> pulumi.Output[Optional[bool]]:
"""
Purge data immediately after 30 days.
"""
return pulumi.get(self, "immediate_purge_data_on30_days")
@property
@pulumi.getter(name="ingestionMode")
def ingestion_mode(self) -> pulumi.Output[Optional[str]]:
"""
Indicates the flow of the ingestion.
"""
return pulumi.get(self, "ingestion_mode")
@property
@pulumi.getter(name="instrumentationKey")
def instrumentation_key(self) -> pulumi.Output[str]:
"""
Application Insights Instrumentation key. A read-only value that applications can use to identify the destination for all telemetry sent to Azure Application Insights. This value will be supplied upon construction of each new Application Insights component.
"""
return pulumi.get(self, "instrumentation_key")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The kind of application that this component refers to, used to customize UI. This value is a freeform string, values should typically be one of the following: web, ios, other, store, java, phone.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateLinkScopedResources")
def private_link_scoped_resources(self) -> pulumi.Output[Sequence['outputs.PrivateLinkScopedResourceResponse']]:
"""
List of linked private link scope resources.
"""
return pulumi.get(self, "private_link_scoped_resources")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Current state of this component: whether or not is has been provisioned within the resource group it is defined. Users cannot change this value but are able to read from it. Values will include Succeeded, Deploying, Canceled, and Failed.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="requestSource")
def request_source(self) -> pulumi.Output[Optional[str]]:
"""
Describes what tool created this Application Insights component. Customers using this API should set this to the default 'rest'.
"""
return pulumi.get(self, "request_source")
@property
@pulumi.getter(name="retentionInDays")
def retention_in_days(self) -> pulumi.Output[Optional[int]]:
"""
Retention period in days.
"""
return pulumi.get(self, "retention_in_days")
@property
@pulumi.getter(name="samplingPercentage")
def sampling_percentage(self) -> pulumi.Output[Optional[float]]:
"""
Percentage of the data produced by the application being monitored that is being sampled for Application Insights telemetry.
"""
return pulumi.get(self, "sampling_percentage")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
Azure Tenant Id.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.255193 | 321 | 0.657531 |
4eca88737c0fc9eaa6620fd1195118ca54d793cf | 17,625 | py | Python | fsspec/implementations/http.py | ysgit/filesystem_spec | f820f46b29f20d7c649509a4995f9cb8b484bbc4 | [
"BSD-3-Clause"
] | null | null | null | fsspec/implementations/http.py | ysgit/filesystem_spec | f820f46b29f20d7c649509a4995f9cb8b484bbc4 | [
"BSD-3-Clause"
] | null | null | null | fsspec/implementations/http.py | ysgit/filesystem_spec | f820f46b29f20d7c649509a4995f9cb8b484bbc4 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, division, absolute_import
import aiohttp
import asyncio
import logging
import re
import requests
import weakref
from urllib.parse import urlparse
from fsspec.spec import AbstractBufferedFile
from fsspec.utils import tokenize, DEFAULT_BLOCK_SIZE
from fsspec.asyn import sync_wrapper, sync, AsyncFileSystem
from ..caching import AllBytes
# https://stackoverflow.com/a/15926317/3821154
ex = re.compile(r"""<a\s+(?:[^>]*?\s+)?href=(["'])(.*?)\1""")
ex2 = re.compile(r"""(http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
logger = logging.getLogger("fsspec.http")
async def get_client(**kwargs):
return aiohttp.ClientSession(**kwargs)
class HTTPFileSystem(AsyncFileSystem):
"""
Simple File-System for fetching data via HTTP(S)
``ls()`` is implemented by loading the parent page and doing a regex
match on the result. If simple_link=True, anything of the form
"http(s)://server.com/stuff?thing=other"; otherwise only links within
HTML href tags will be used.
"""
sep = "/"
def __init__(
self,
simple_links=True,
block_size=None,
same_scheme=True,
size_policy=None,
cache_type="bytes",
cache_options=None,
asynchronous=False,
loop=None,
client_kwargs=None,
**storage_options
):
"""
NB: if this is called async, you must await set_client
Parameters
----------
block_size: int
Blocks to read bytes; if 0, will default to raw requests file-like
objects instead of HTTPFile instances
simple_links: bool
If True, will consider both HTML <a> tags and anything that looks
like a URL; if False, will consider only the former.
same_scheme: True
When doing ls/glob, if this is True, only consider paths that have
http/https matching the input URLs.
size_policy: this argument is deprecated
client_kwargs: dict
Passed to aiohttp.ClientSession, see
https://docs.aiohttp.org/en/stable/client_reference.html
For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}``
storage_options: key-value
Any other parameters passed on to requests
cache_type, cache_options: defaults used in open
"""
super().__init__(self, asynchronous=asynchronous, loop=loop, **storage_options)
self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE
self.simple_links = simple_links
self.same_schema = same_scheme
self.cache_type = cache_type
self.cache_options = cache_options
self.client_kwargs = client_kwargs or {}
self.kwargs = storage_options
if not asynchronous:
self._session = sync(self.loop, get_client, **self.client_kwargs)
weakref.finalize(self, sync, self.loop, self.session.close)
else:
self._session = None
@property
def session(self):
if self._session is None:
raise RuntimeError("please await ``.set_session`` before anything else")
return self._session
async def set_session(self):
self._session = await get_client(**self.client_kwargs)
@classmethod
def _strip_protocol(cls, path):
"""For HTTP, we always want to keep the full URL"""
return path
@classmethod
def _parent(cls, path):
# override, since _strip_protocol is different for URLs
par = super()._parent(path)
if len(par) > 7: # "http://..."
return par
return ""
async def _ls(self, url, detail=True, **kwargs):
# ignoring URL-encoded arguments
kw = self.kwargs.copy()
kw.update(kwargs)
logger.debug(url)
async with self.session.get(url, **self.kwargs) as r:
r.raise_for_status()
text = await r.text()
if self.simple_links:
links = ex2.findall(text) + ex.findall(text)
else:
links = ex.findall(text)
out = set()
parts = urlparse(url)
for l in links:
if isinstance(l, tuple):
l = l[1]
if l.startswith("/") and len(l) > 1:
# absolute URL on this server
l = parts.scheme + "://" + parts.netloc + l
if l.startswith("http"):
if self.same_schema and l.startswith(url.rstrip("/") + "/"):
out.add(l)
elif l.replace("https", "http").startswith(
url.replace("https", "http").rstrip("/") + "/"
):
# allowed to cross http <-> https
out.add(l)
else:
if l not in ["..", "../"]:
# Ignore FTP-like "parent"
out.add("/".join([url.rstrip("/"), l.lstrip("/")]))
if not out and url.endswith("/"):
return await self._ls(url.rstrip("/"), detail=True)
if detail:
return [
{
"name": u,
"size": None,
"type": "directory" if u.endswith("/") else "file",
}
for u in out
]
else:
return list(sorted(out))
async def _cat_file(self, url, start=None, end=None, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
logger.debug(url)
if (start is None) ^ (end is None):
raise ValueError("Give start and end or neither")
if start is not None:
headers = kw.pop("headers", {}).copy()
headers["Range"] = "bytes=%i-%i" % (start, end - 1)
kw["headers"] = headers
async with self.session.get(url, **kw) as r:
if r.status == 404:
raise FileNotFoundError(url)
r.raise_for_status()
out = await r.read()
return out
async def _get_file(self, rpath, lpath, chunk_size=5 * 2 ** 20, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
logger.debug(rpath)
async with self.session.get(rpath, **self.kwargs) as r:
if r.status == 404:
raise FileNotFoundError(rpath)
r.raise_for_status()
with open(lpath, "wb") as fd:
chunk = True
while chunk:
chunk = await r.content.read(chunk_size)
fd.write(chunk)
async def _exists(self, path, **kwargs):
kw = self.kwargs.copy()
kw.update(kwargs)
try:
logger.debug(path)
r = await self.session.get(path, **kw)
async with r:
return r.status < 400
except (requests.HTTPError, aiohttp.client_exceptions.ClientError):
return False
async def _isfile(self, path, **kwargs):
return await self._exists(path, **kwargs)
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=None, # XXX: This differs from the base class.
cache_type=None,
cache_options=None,
**kwargs
):
"""Make a file-like object
Parameters
----------
path: str
Full URL with protocol
mode: string
must be "rb"
block_size: int or None
Bytes to download in one request; use instance value if None. If
zero, will return a streaming Requests file-like instance.
kwargs: key-value
Any other parameters, passed to requests calls
"""
if mode != "rb":
raise NotImplementedError
block_size = block_size if block_size is not None else self.block_size
kw = self.kwargs.copy()
kw["asynchronous"] = self.asynchronous
kw.update(kwargs)
size = self.size(path)
if block_size and size:
return HTTPFile(
self,
path,
session=self.session,
block_size=block_size,
mode=mode,
size=size,
cache_type=cache_type or self.cache_type,
cache_options=cache_options or self.cache_options,
loop=self.loop,
**kw
)
else:
return HTTPStreamFile(
self, path, mode=mode, loop=self.loop, session=self.session, **kw
)
def ukey(self, url):
"""Unique identifier; assume HTTP files are static, unchanging"""
return tokenize(url, self.kwargs, self.protocol)
async def _info(self, url, **kwargs):
"""Get info of URL
Tries to access location via HEAD, and then GET methods, but does
not fetch the data.
It is possible that the server does not supply any size information, in
which case size will be given as None (and certain operations on the
corresponding file will not work).
"""
size = False
for policy in ["head", "get"]:
try:
size = await _file_size(
url, size_policy=policy, session=self.session, **self.kwargs
)
if size:
break
except Exception:
pass
else:
# get failed, so conclude URL does not exist
if size is False:
raise FileNotFoundError(url)
return {"name": url, "size": size or None, "type": "file"}
def isdir(self, path):
# override, since all URLs are (also) files
return bool(self.ls(path))
class HTTPFile(AbstractBufferedFile):
"""
A file-like object pointing to a remove HTTP(S) resource
Supports only reading, with read-ahead of a predermined block-size.
In the case that the server does not supply the filesize, only reading of
the complete file in one go is supported.
Parameters
----------
url: str
Full URL of the remote resource, including the protocol
session: requests.Session or None
All calls will be made within this session, to avoid restarting
connections where the server allows this
block_size: int or None
The amount of read-ahead to do, in bytes. Default is 5MB, or the value
configured for the FileSystem creating this file
size: None or int
If given, this is the size of the file in bytes, and we don't attempt
to call the server to find the value.
kwargs: all other key-values are passed to requests calls.
"""
def __init__(
self,
fs,
url,
session=None,
block_size=None,
mode="rb",
cache_type="bytes",
cache_options=None,
size=None,
loop=None,
asynchronous=False,
**kwargs
):
if mode != "rb":
raise NotImplementedError("File mode not supported")
self.asynchronous = asynchronous
self.url = url
self.session = session
self.details = {"name": url, "size": size, "type": "file"}
super().__init__(
fs=fs,
path=url,
mode=mode,
block_size=block_size,
cache_type=cache_type,
cache_options=cache_options,
**kwargs
)
self.loop = loop
def read(self, length=-1):
"""Read bytes from file
Parameters
----------
length: int
Read up to this many bytes. If negative, read all content to end of
file. If the server has not supplied the filesize, attempting to
read only part of the data will raise a ValueError.
"""
if (
(length < 0 and self.loc == 0)
or (length > (self.size or length)) # explicit read all
or ( # read more than there is
self.size and self.size < self.blocksize
) # all fits in one block anyway
):
self._fetch_all()
if self.size is None:
if length < 0:
self._fetch_all()
else:
length = min(self.size - self.loc, length)
return super().read(length)
async def async_fetch_all(self):
"""Read whole file in one shot, without caching
This is only called when position is still at zero,
and read() is called without a byte-count.
"""
if not isinstance(self.cache, AllBytes):
r = await self.session.get(self.url, **self.kwargs)
async with r:
r.raise_for_status()
out = await r.read()
self.cache = AllBytes(
size=len(out), fetcher=None, blocksize=None, data=out
)
self.size = len(out)
_fetch_all = sync_wrapper(async_fetch_all)
async def async_fetch_range(self, start, end):
"""Download a block of data
The expectation is that the server returns only the requested bytes,
with HTTP code 206. If this is not the case, we first check the headers,
and then stream the output - if the data size is bigger than we
requested, an exception is raised.
"""
kwargs = self.kwargs.copy()
headers = kwargs.pop("headers", {}).copy()
headers["Range"] = "bytes=%i-%i" % (start, end - 1)
logger.debug(self.url + " : " + headers["Range"])
r = await self.session.get(self.url, headers=headers, **kwargs)
async with r:
if r.status == 416:
# range request outside file
return b""
r.raise_for_status()
if r.status == 206:
# partial content, as expected
out = await r.read()
elif "Content-Length" in r.headers:
cl = int(r.headers["Content-Length"])
if cl <= end - start:
# data size OK
out = await r.read()
else:
raise ValueError(
"Got more bytes (%i) than requested (%i)" % (cl, end - start)
)
else:
cl = 0
out = []
while True:
chunk = await r.content.read(2 ** 20)
# data size unknown, let's see if it goes too big
if chunk:
out.append(chunk)
cl += len(chunk)
if cl > end - start:
raise ValueError(
"Got more bytes so far (>%i) than requested (%i)"
% (cl, end - start)
)
else:
break
out = b"".join(out)
return out
_fetch_range = sync_wrapper(async_fetch_range)
def close(self):
pass
async def get(session, url, **kwargs):
return await session.get(url, **kwargs)
class HTTPStreamFile(AbstractBufferedFile):
def __init__(self, fs, url, mode="rb", loop=None, session=None, **kwargs):
self.asynchronous = kwargs.pop("asynchronous", False)
self.url = url
self.loop = loop
self.session = session
if mode != "rb":
raise ValueError
self.details = {"name": url, "size": None}
super().__init__(fs=fs, path=url, mode=mode, cache_type="none", **kwargs)
self.r = sync(self.loop, get, self.session, url, **kwargs)
def seek(self, *args, **kwargs):
raise ValueError("Cannot seek streaming HTTP file")
async def _read(self, num=-1):
out = await self.r.content.read(num)
self.loc += len(out)
return out
read = sync_wrapper(_read)
async def _close(self):
self.r.close()
def close(self):
asyncio.run_coroutine_threadsafe(self._close(), self.loop)
async def get_range(session, url, start, end, file=None, **kwargs):
# explicit get a range when we know it must be safe
kwargs = kwargs.copy()
headers = kwargs.pop("headers", {}).copy()
headers["Range"] = "bytes=%i-%i" % (start, end - 1)
r = await session.get(url, headers=headers, **kwargs)
r.raise_for_status()
async with r:
out = await r.read()
if file:
with open(file, "rb+") as f:
f.seek(start)
f.write(out)
else:
return out
async def _file_size(url, session=None, size_policy="head", **kwargs):
"""Call HEAD on the server to get file size
Default operation is to explicitly allow redirects and use encoding
'identity' (no compression) to get the true size of the target.
"""
kwargs = kwargs.copy()
ar = kwargs.pop("allow_redirects", True)
head = kwargs.get("headers", {}).copy()
head["Accept-Encoding"] = "identity"
session = session or await get_client()
if size_policy == "head":
r = await session.head(url, allow_redirects=ar, **kwargs)
elif size_policy == "get":
r = await session.get(url, allow_redirects=ar, **kwargs)
else:
raise TypeError('size_policy must be "head" or "get", got %s' "" % size_policy)
async with r:
if "Content-Length" in r.headers:
return int(r.headers["Content-Length"])
elif "Content-Range" in r.headers:
return int(r.headers["Content-Range"].split("/")[1])
file_size = sync_wrapper(_file_size)
| 34.356725 | 87 | 0.551149 |
1f3fff5e89cf44374706ee8514bef430683a2a5e | 15,408 | py | Python | test/TestUpdateEventName.py | indera/redi | c2a41ce6e5b613f75eaa8e6426ff948e0bfab349 | [
"BSD-3-Clause"
] | null | null | null | test/TestUpdateEventName.py | indera/redi | c2a41ce6e5b613f75eaa8e6426ff948e0bfab349 | [
"BSD-3-Clause"
] | null | null | null | test/TestUpdateEventName.py | indera/redi | c2a41ce6e5b613f75eaa8e6426ff948e0bfab349 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from lxml import etree
import os
from redi import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestUpdateEventName(unittest.TestCase):
def setUp(self):
self.sortedData = """
<study>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>07/29/17</Collection_Date>
<Collection_Time>11:00</Collection_Time>
<Component_Name>WHITE BLOOD CELL COUNT</Component_Name>
<loinc_code>1577876</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>8.7</Result_Value>
<timestamp>1906-03-15 11:00</timestamp><redcapFormName>cbc</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>07/29/17</Collection_Date>
<Collection_Time>11:00</Collection_Time>
<Component_Name>HEMOGLOBIN</Component_Name>
<loinc_code>1534435</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>11.3</Result_Value>
<timestamp>1906-03-15 11:00</timestamp><redcapFormName>cbc</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>12:00</Collection_Time>
<Component_Name>WHITE BLOOD CELL COUNT</Component_Name>
<loinc_code>1577876</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>8.7</Result_Value>
<timestamp>1903-04-16 12:00</timestamp><redcapFormName>cbc</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>12:00</Collection_Time>
<Component_Name>HEMOGLOBIN</Component_Name>
<loinc_code>1534435</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>11.3</Result_Value>
<timestamp>1903-04-16 12:00</timestamp><redcapFormName>cbc</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>10/12/18</Collection_Date>
<Collection_Time>12:38</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.8</Result_Value>
<timestamp>1908-07-01 12:38</timestamp><redcapFormName>chemistry</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>10/12/18</Collection_Date>
<Collection_Time>16:01</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.9</Result_Value>
<timestamp>1908-07-01 16:01</timestamp><redcapFormName>chemistry</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMATOCRIT</Component_Name>
<loinc_code>1534436</loinc_code>
<Reference_Unit>%</Reference_Unit>
<Result_Value>34.5</Result_Value>
<timestamp>1903-04-16 13:50</timestamp><redcapFormName>undefined</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>07/29/17</Collection_Date>
<Collection_Time>11:00</Collection_Time>
<Component_Name>WHITE BLOOD CELL COUNT</Component_Name>
<loinc_code>1577876</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>8.7</Result_Value>
<timestamp>1906-03-15 11:00</timestamp><redcapFormName>cbc</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMOGLOBIN</Component_Name>
<loinc_code>1534435</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>11.3</Result_Value>
<timestamp>1903-04-16 13:50</timestamp><redcapFormName>cbc</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>10/12/18</Collection_Date>
<Collection_Time>12:38</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.8</Result_Value>
<timestamp>1908-07-01 12:38</timestamp><redcapFormName>chemistry</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>10/12/18</Collection_Date>
<Collection_Time>16:01</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.9</Result_Value>
<timestamp>1908-07-01 16:01</timestamp><redcapFormName>chemistry</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMATOCRIT</Component_Name>
<loinc_code>1534436</loinc_code>
<Reference_Unit>%</Reference_Unit>
<Result_Value>34.5</Result_Value>
<timestamp>1903-04-16 13:50</timestamp><redcapFormName>undefined</redcapFormName><eventName/><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
</study>"""
self.data = etree.ElementTree(etree.fromstring(self.sortedData))
self.form_events = """<?xml version='1.0' encoding='US-ASCII'?>
<redcapProject>
<name>My Test Project</name>
<form>
<name>cbc</name>
<formDateField>cbc_lbdtc</formDateField>
<formCompletedFieldName>cbc_complete</formCompletedFieldName>
<event>
<name>1_arm_1</name>
</event>
<event>
<name>2_arm_1</name>
</event>
<event>
<name>3_arm_1</name>
</event>
</form>
<form>
<name>chemistry</name>
<formDateField>chemistry_lbdtc</formDateField>
<formCompletedFieldName>chemistry_complete</formCompletedFieldName>
<event>
<name>1_arm_1</name>
</event>
<event>
<name>2_arm_1</name>
</event>
<event>
<name>3_arm_1</name>
</event>
</form>
</redcapProject>
"""
self.form_events_tree = etree.ElementTree(etree.fromstring(self.form_events))
self.output = """<study>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>07/29/17</Collection_Date>
<Collection_Time>11:00</Collection_Time>
<Component_Name>WHITE BLOOD CELL COUNT</Component_Name>
<loinc_code>1577876</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>8.7</Result_Value>
<timestamp>1906-03-15 11:00</timestamp><redcapFormName>cbc</redcapFormName><eventName>1_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>07/29/17</Collection_Date>
<Collection_Time>11:00</Collection_Time>
<Component_Name>HEMOGLOBIN</Component_Name>
<loinc_code>1534435</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>11.3</Result_Value>
<timestamp>1906-03-15 11:00</timestamp><redcapFormName>cbc</redcapFormName><eventName>1_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>12:00</Collection_Time>
<Component_Name>WHITE BLOOD CELL COUNT</Component_Name>
<loinc_code>1577876</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>8.7</Result_Value>
<timestamp>1903-04-16 12:00</timestamp><redcapFormName>cbc</redcapFormName><eventName>2_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>12:00</Collection_Time>
<Component_Name>HEMOGLOBIN</Component_Name>
<loinc_code>1534435</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>11.3</Result_Value>
<timestamp>1903-04-16 12:00</timestamp><redcapFormName>cbc</redcapFormName><eventName>2_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>10/12/18</Collection_Date>
<Collection_Time>12:38</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.8</Result_Value>
<timestamp>1908-07-01 12:38</timestamp><redcapFormName>chemistry</redcapFormName><eventName>1_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>10/12/18</Collection_Date>
<Collection_Time>16:01</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.9</Result_Value>
<timestamp>1908-07-01 16:01</timestamp><redcapFormName>chemistry</redcapFormName><eventName>2_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>11</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMATOCRIT</Component_Name>
<loinc_code>1534436</loinc_code>
<Reference_Unit>%</Reference_Unit>
<Result_Value>34.5</Result_Value>
<timestamp>1903-04-16 13:50</timestamp><redcapFormName>undefined</redcapFormName><eventName>undefined</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>07/29/17</Collection_Date>
<Collection_Time>11:00</Collection_Time>
<Component_Name>WHITE BLOOD CELL COUNT</Component_Name>
<loinc_code>1577876</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>8.7</Result_Value>
<timestamp>1906-03-15 11:00</timestamp><redcapFormName>cbc</redcapFormName><eventName>1_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMOGLOBIN</Component_Name>
<loinc_code>1534435</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>11.3</Result_Value>
<timestamp>1903-04-16 13:50</timestamp><redcapFormName>cbc</redcapFormName><eventName>2_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>10/12/18</Collection_Date>
<Collection_Time>12:38</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.8</Result_Value>
<timestamp>1908-07-01 12:38</timestamp><redcapFormName>chemistry</redcapFormName><eventName>1_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>10/12/18</Collection_Date>
<Collection_Time>16:01</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.9</Result_Value>
<timestamp>1908-07-01 16:01</timestamp><redcapFormName>chemistry</redcapFormName><eventName>2_arm_1</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
<subject>
<STUDY_ID>22</STUDY_ID>
<Collection_Date>05/20/20</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMATOCRIT</Component_Name>
<loinc_code>1534436</loinc_code>
<Reference_Unit>%</Reference_Unit>
<Result_Value>34.5</Result_Value>
<timestamp>1903-04-16 13:50</timestamp><redcapFormName>undefined</redcapFormName><eventName>undefined</eventName><formDateField/><formCompletedFieldName/><timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/></subject>
</study>"""
self.expect = etree.tostring(etree.fromstring(self.output))
def test_update_event_name(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
redi.update_event_name(self.data, self.form_events_tree, 'undefined')
result = etree.tostring(self.data)
self.assertEqual(self.expect, result)
def tearDown(self):
return()
if __name__ == "__main__":
unittest.main()
| 53.314879 | 250 | 0.716446 |
f9c59590c3febb4fec99628bf4a99fa996524c9c | 1,490 | py | Python | allauth/socialaccount/providers/stackexchange/tests.py | Stikerz/Film-Guide | 5c0a7a6f8c530e72bef42fcbac6a52665834c4e0 | [
"BSD-3-Clause"
] | null | null | null | allauth/socialaccount/providers/stackexchange/tests.py | Stikerz/Film-Guide | 5c0a7a6f8c530e72bef42fcbac6a52665834c4e0 | [
"BSD-3-Clause"
] | null | null | null | allauth/socialaccount/providers/stackexchange/tests.py | Stikerz/Film-Guide | 5c0a7a6f8c530e72bef42fcbac6a52665834c4e0 | [
"BSD-3-Clause"
] | 1 | 2022-02-01T17:19:28.000Z | 2022-02-01T17:19:28.000Z | from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import StackExchangeProvider
class StackExchangeTests(OAuth2TestsMixin, TestCase):
provider_id = StackExchangeProvider.id
def get_mocked_response(self):
return MockedResponse(200, """
{
"has_more": false,
"items": [
{
"is_employee": false,
"last_access_date": 1356200390,
"display_name": "pennersr",
"account_id": 291652,
"badge_counts": {
"bronze": 2,
"silver": 2,
"gold": 0
},
"last_modified_date": 1356199552,
"profile_image": "http://www.gravatar.com/avatar/053d648486d567d3143d6bad8df8cfeb?d=identicon&r=PG",
"user_type": "registered",
"creation_date": 1296223711,
"reputation_change_quarter": 148,
"reputation_change_year": 378,
"reputation": 504,
"link": "http://stackoverflow.com/users/593944/pennersr",
"reputation_change_week": 0,
"user_id": 593944,
"reputation_change_month": 10,
"reputation_change_day": 0
}
],
"quota_max": 10000,
"quota_remaining": 9999
}""") # noqa
| 35.47619 | 117 | 0.518121 |
b2079628e10327e8281e31f83133d20fe29879ae | 203 | py | Python | XSS/lib/EstableSession.py | Vishalcc1/XSS | 0c61c680b1c00c1ded7e803b8c3966d16e6b7f1f | [
"MIT"
] | null | null | null | XSS/lib/EstableSession.py | Vishalcc1/XSS | 0c61c680b1c00c1ded7e803b8c3966d16e6b7f1f | [
"MIT"
] | null | null | null | XSS/lib/EstableSession.py | Vishalcc1/XSS | 0c61c680b1c00c1ded7e803b8c3966d16e6b7f1f | [
"MIT"
] | null | null | null | import requests
import json
def estableSession(args):
req=requests.Session()
req.proxies=args.proxy
req.headers = args.header
req.cookies.update(json.loads(args.cookie))
return req
| 18.454545 | 47 | 0.719212 |
8c926e7bd8aee2adc037a5687cc150fb7b28d525 | 6,072 | py | Python | python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/solids.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 1 | 2021-07-03T09:05:58.000Z | 2021-07-03T09:05:58.000Z | python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/solids.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 1 | 2021-06-21T18:30:02.000Z | 2021-06-25T21:18:39.000Z | python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/solids.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 1 | 2021-11-30T21:40:46.000Z | 2021-11-30T21:40:46.000Z | import hashlib
from dagster import InputDefinition, List, Nothing, OutputDefinition, check, solid
from dagster_pandas import DataFrame
from google.cloud.bigquery.job import LoadJobConfig, QueryJobConfig
from google.cloud.bigquery.table import EncryptionConfiguration, TimePartitioning
from .configs import (
define_bigquery_create_dataset_config,
define_bigquery_delete_dataset_config,
define_bigquery_load_config,
define_bigquery_query_config,
)
from .types import BigQueryLoadSource
_START = "start"
def _preprocess_config(cfg):
destination_encryption_configuration = cfg.get("destination_encryption_configuration")
time_partitioning = cfg.get("time_partitioning")
if destination_encryption_configuration is not None:
cfg["destination_encryption_configuration"] = EncryptionConfiguration(
kms_key_name=destination_encryption_configuration
)
if time_partitioning is not None:
cfg["time_partitioning"] = TimePartitioning(**time_partitioning)
return cfg
def bq_solid_for_queries(sql_queries):
"""
Executes BigQuery SQL queries.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
sql_queries = check.list_param(sql_queries, "sql queries", of_type=str)
m = hashlib.sha1()
for query in sql_queries:
m.update(query.encode("utf-8"))
name = "bq_solid_{hash}".format(hash=m.hexdigest()[:10])
@solid(
name=name,
input_defs=[InputDefinition(_START, Nothing)],
output_defs=[OutputDefinition(List[DataFrame])],
config_schema=define_bigquery_query_config(),
required_resource_keys={"bigquery"},
tags={"kind": "sql", "sql": "\n".join(sql_queries)},
)
def _solid(context): # pylint: disable=unused-argument
query_job_config = _preprocess_config(context.solid_config.get("query_job_config", {}))
# Retrieve results as pandas DataFrames
results = []
for sql_query in sql_queries:
# We need to construct a new QueryJobConfig for each query.
# See: https://bit.ly/2VjD6sl
cfg = QueryJobConfig(**query_job_config) if query_job_config else None
context.log.info(
"executing query %s with config: %s"
% (sql_query, cfg.to_api_repr() if cfg else "(no config provided)")
)
results.append(
context.resources.bigquery.query(sql_query, job_config=cfg).to_dataframe()
)
return results
return _solid
BIGQUERY_LOAD_CONFIG = define_bigquery_load_config()
@solid(
input_defs=[InputDefinition("paths", List[str])],
output_defs=[OutputDefinition(Nothing)],
config_schema=BIGQUERY_LOAD_CONFIG,
required_resource_keys={"bigquery"},
)
def import_gcs_paths_to_bq(context, paths):
return _execute_load_in_source(context, paths, BigQueryLoadSource.GCS)
@solid(
input_defs=[InputDefinition("df", DataFrame)],
output_defs=[OutputDefinition(Nothing)],
config_schema=BIGQUERY_LOAD_CONFIG,
required_resource_keys={"bigquery"},
)
def import_df_to_bq(context, df):
return _execute_load_in_source(context, df, BigQueryLoadSource.DataFrame)
@solid(
input_defs=[InputDefinition("path", str)],
output_defs=[OutputDefinition(Nothing)],
config_schema=BIGQUERY_LOAD_CONFIG,
required_resource_keys={"bigquery"},
)
def import_file_to_bq(context, path):
return _execute_load_in_source(context, path, BigQueryLoadSource.File)
def _execute_load_in_source(context, source, source_name):
destination = context.solid_config.get("destination")
load_job_config = _preprocess_config(context.solid_config.get("load_job_config", {}))
cfg = LoadJobConfig(**load_job_config) if load_job_config else None
context.log.info(
"executing BQ load with config: %s for source %s"
% (cfg.to_api_repr() if cfg else "(no config provided)", source)
)
if source_name == BigQueryLoadSource.DataFrame:
context.resources.bigquery.load_table_from_dataframe(
source, destination, job_config=cfg
).result()
# Load from file. See: https://cloud.google.com/bigquery/docs/loading-data-local
elif source_name == BigQueryLoadSource.File:
with open(source, "rb") as file_obj:
context.resources.bigquery.load_table_from_file(
file_obj, destination, job_config=cfg
).result()
# Load from GCS. See: https://cloud.google.com/bigquery/docs/loading-data-cloud-storage
elif source_name == BigQueryLoadSource.GCS:
context.resources.bigquery.load_table_from_uri(source, destination, job_config=cfg).result()
@solid(
input_defs=[InputDefinition(_START, Nothing)],
config_schema=define_bigquery_create_dataset_config(),
required_resource_keys={"bigquery"},
)
def bq_create_dataset(context):
"""BigQuery Create Dataset.
This solid encapsulates creating a BigQuery dataset.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
(dataset, exists_ok) = [context.solid_config.get(k) for k in ("dataset", "exists_ok")]
context.log.info("executing BQ create_dataset for dataset %s" % (dataset))
context.resources.bigquery.create_dataset(dataset, exists_ok)
@solid(
input_defs=[InputDefinition(_START, Nothing)],
config_schema=define_bigquery_delete_dataset_config(),
required_resource_keys={"bigquery"},
)
def bq_delete_dataset(context):
"""BigQuery Delete Dataset.
This solid encapsulates deleting a BigQuery dataset.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
(dataset, delete_contents, not_found_ok) = [
context.solid_config.get(k) for k in ("dataset", "delete_contents", "not_found_ok")
]
context.log.info("executing BQ delete_dataset for dataset %s" % dataset)
context.resources.bigquery.delete_dataset(
dataset, delete_contents=delete_contents, not_found_ok=not_found_ok
)
| 34.305085 | 100 | 0.717556 |
8037ab5d876346414b5e6e46b3c19e6a65d60a7f | 2,794 | py | Python | tests/ping.py | joaoantoniopereira/Boardfarm | fa00a2628c5924ca13e05c6ba4990627038b9f1f | [
"BSD-3-Clause-Clear"
] | 74 | 2015-08-25T22:50:38.000Z | 2022-01-04T16:32:00.000Z | tests/ping.py | joaoantoniopereira/Boardfarm | fa00a2628c5924ca13e05c6ba4990627038b9f1f | [
"BSD-3-Clause-Clear"
] | 72 | 2015-10-12T17:42:47.000Z | 2019-01-19T14:11:18.000Z | tests/ping.py | joaoantoniopereira/Boardfarm | fa00a2628c5924ca13e05c6ba4990627038b9f1f | [
"BSD-3-Clause-Clear"
] | 50 | 2015-08-25T22:45:44.000Z | 2022-01-05T09:47:04.000Z | # Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
import rootfs_boot
import lib
from devices import board, wan, lan, wlan, prompt
class RouterPingWanDev(rootfs_boot.RootFSBootTest):
'''Router can ping device through WAN interface.'''
def runTest(self):
if not wan:
msg = 'No WAN Device defined, skipping ping WAN test.'
lib.common.test_msg(msg)
self.skipTest(msg)
board.sendline('\nping -c5 192.168.0.1')
board.expect('5 packets received', timeout=10)
board.expect(prompt)
def recover(self):
board.sendcontrol('c')
class RouterPingInternet(rootfs_boot.RootFSBootTest):
'''Router can ping internet address by IP.'''
def runTest(self):
board.sendline('\nping -c2 8.8.8.8')
board.expect('2 packets received', timeout=10)
board.expect(prompt)
class RouterPingInternetName(rootfs_boot.RootFSBootTest):
'''Router can ping internet address by name.'''
def runTest(self):
board.sendline('\nping -c2 www.google.com')
board.expect('2 packets received', timeout=10)
board.expect(prompt)
class LanDevPingRouter(rootfs_boot.RootFSBootTest):
'''Device on LAN can ping router.'''
def runTest(self):
if not lan:
msg = 'No LAN Device defined, skipping ping test from LAN.'
lib.common.test_msg(msg)
self.skipTest(msg)
lan.sendline('\nping -i 0.2 -c 5 192.168.1.1')
lan.expect('PING ')
lan.expect('5 received', timeout=15)
lan.expect(prompt)
class LanDevPingWanDev(rootfs_boot.RootFSBootTest):
'''Device on LAN can ping through router.'''
def runTest(self):
if not lan:
msg = 'No LAN Device defined, skipping ping test from LAN.'
lib.common.test_msg(msg)
self.skipTest(msg)
if not wan:
msg = 'No WAN Device defined, skipping ping WAN test.'
lib.common.test_msg(msg)
self.skipTest(msg)
lan.sendline('\nping -i 0.2 -c 5 192.168.0.1')
lan.expect('PING ')
lan.expect('5 received', timeout=15)
lan.expect(prompt)
def recover(self):
lan.sendcontrol('c')
class LanDevPingInternet(rootfs_boot.RootFSBootTest):
'''Device on LAN can ping through router to internet.'''
def runTest(self):
if not lan:
msg = 'No LAN Device defined, skipping ping test from LAN.'
lib.common.test_msg(msg)
self.skipTest(msg)
lan.sendline('\nping -c2 8.8.8.8')
lan.expect('2 received', timeout=10)
lan.expect(prompt)
def recover(self):
lan.sendcontrol('c')
| 34.493827 | 71 | 0.625626 |
b16f82b2a06030af06945d2e2540905e65929836 | 10,500 | py | Python | neutron/cmd/netns_cleanup.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | neutron/cmd/netns_cleanup.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | neutron/cmd/netns_cleanup.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import re
import signal
import time
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from neutron._i18n import _LE, _LW
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import namespaces
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import config
from neutron.conf.agent import cmd
from neutron.conf.agent import common as agent_config
from neutron.conf.agent import dhcp as dhcp_config
LOG = logging.getLogger(__name__)
LB_NS_PREFIX = 'qlbaas-'
NS_PREFIXES = {
'dhcp': [dhcp.NS_PREFIX],
'l3': [namespaces.NS_PREFIX, dvr_snat_ns.SNAT_NS_PREFIX,
dvr_fip_ns.FIP_NS_PREFIX],
'lbaas': [LB_NS_PREFIX],
}
SIGTERM_WAITTIME = 10
NETSTAT_PIDS_REGEX = re.compile(r'.* (?P<pid>\d{2,6})/.*')
class PidsInNamespaceException(Exception):
pass
class FakeDhcpPlugin(object):
"""Fake RPC plugin to bypass any RPC calls."""
def __getattribute__(self, name):
def fake_method(*args):
pass
return fake_method
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
conf = cfg.CONF
cmd.register_cmd_opts(cmd.netns_opts, conf)
agent_config.register_interface_driver_opts_helper(conf)
dhcp_config.register_agent_dhcp_opts(conf)
conf.register_opts(interface.OPTS)
return conf
def _get_dhcp_process_monitor(config):
return external_process.ProcessMonitor(config=config,
resource_type='dhcp')
def kill_dhcp(conf, namespace):
"""Disable DHCP for a network if DHCP is still active."""
network_id = namespace.replace(dhcp.NS_PREFIX, '')
dhcp_driver = importutils.import_object(
conf.dhcp_driver,
conf=conf,
process_monitor=_get_dhcp_process_monitor(conf),
network=dhcp.NetModel({'id': network_id}),
plugin=FakeDhcpPlugin())
if dhcp_driver.active:
dhcp_driver.disable()
def eligible_for_deletion(conf, namespace, force=False):
"""Determine whether a namespace is eligible for deletion.
Eligibility is determined by having only the lo device or if force
is passed as a parameter.
"""
if conf.agent_type:
prefixes = NS_PREFIXES.get(conf.agent_type)
else:
prefixes = itertools.chain(*NS_PREFIXES.values())
ns_mangling_pattern = '(%s%s)' % ('|'.join(prefixes),
constants.UUID_PATTERN)
# filter out namespaces without UUID as the name
if not re.match(ns_mangling_pattern, namespace):
return False
ip = ip_lib.IPWrapper(namespace=namespace)
return force or ip.namespace_is_empty()
def unplug_device(conf, device):
orig_log_fail_as_error = device.get_log_fail_as_error()
device.set_log_fail_as_error(False)
try:
device.link.delete()
except RuntimeError:
device.set_log_fail_as_error(orig_log_fail_as_error)
# Maybe the device is OVS port, so try to delete
ovs = ovs_lib.BaseOVS()
bridge_name = ovs.get_bridge_for_iface(device.name)
if bridge_name:
bridge = ovs_lib.OVSBridge(bridge_name)
bridge.delete_port(device.name)
else:
LOG.debug('Unable to find bridge for device: %s', device.name)
finally:
device.set_log_fail_as_error(orig_log_fail_as_error)
def find_listen_pids_namespace(namespace):
"""Retrieve a list of pids of listening processes within the given netns.
It executes netstat -nlp and returns a set of unique pairs
"""
ip = ip_lib.IPWrapper(namespace=namespace)
pids = set()
cmd = ['netstat', '-nlp']
output = ip.netns.execute(cmd, run_as_root=True)
for line in output.splitlines():
m = NETSTAT_PIDS_REGEX.match(line)
if m:
pids.add(m.group('pid'))
return pids
def wait_until_no_listen_pids_namespace(namespace, timeout=SIGTERM_WAITTIME):
"""Poll listening processes within the given namespace.
If after timeout seconds, there are remaining processes in the namespace,
then a PidsInNamespaceException will be thrown.
"""
# NOTE(dalvarez): This function can block forever if
# find_listen_pids_in_namespace never returns which is really unlikely. We
# can't use wait_until_true because we might get interrupted by eventlet
# Timeout during our I/O with rootwrap daemon and that will lead to errors
# in subsequent calls to utils.execute grabbing always the output of the
# previous command
start = end = time.time()
while end - start < timeout:
if not find_listen_pids_namespace(namespace):
return
time.sleep(1)
end = time.time()
raise PidsInNamespaceException
def _kill_listen_processes(namespace, force=False):
"""Identify all listening processes within the given namespace.
Then, for each one, find its top parent with same cmdline (in case this
process forked) and issue a SIGTERM to all of them. If force is True,
then a SIGKILL will be issued to all parents and all their children. Also,
this function returns the number of listening processes.
"""
pids = find_listen_pids_namespace(namespace)
pids_to_kill = {utils.find_fork_top_parent(pid) for pid in pids}
kill_signal = signal.SIGTERM
if force:
kill_signal = signal.SIGKILL
children = [utils.find_child_pids(pid, True) for pid in pids_to_kill]
pids_to_kill.update(itertools.chain.from_iterable(children))
for pid in pids_to_kill:
# Throw a warning since this particular cleanup may need a specific
# implementation in the right module. Ideally, netns_cleanup wouldn't
# kill any processes as the responsible module should've killed them
# before cleaning up the namespace
LOG.warning(_LW("Killing (%(signal)d) [%(pid)s] %(cmdline)s"),
{'signal': kill_signal,
'pid': pid,
'cmdline': ' '.join(utils.get_cmdline_from_pid(pid))[:80]
})
try:
utils.kill_process(pid, kill_signal, run_as_root=True)
except Exception as ex:
LOG.error(_LE('An error occurred while killing '
'[%(pid)s]: %(msg)s'), {'pid': pid, 'msg': ex})
return len(pids)
def kill_listen_processes(namespace):
"""Kill all processes listening within the given namespace.
First it tries to kill them using SIGTERM, waits until they die gracefully
and then kills remaining processes (if any) with SIGKILL
"""
if _kill_listen_processes(namespace, force=False):
try:
wait_until_no_listen_pids_namespace(namespace)
except PidsInNamespaceException:
_kill_listen_processes(namespace, force=True)
# Allow some time for remaining processes to die
wait_until_no_listen_pids_namespace(namespace)
def destroy_namespace(conf, namespace, force=False):
"""Destroy a given namespace.
If force is True, then dhcp (if it exists) will be disabled and all
devices will be forcibly removed.
"""
try:
ip = ip_lib.IPWrapper(namespace=namespace)
if force:
kill_dhcp(conf, namespace)
# NOTE: The dhcp driver will remove the namespace if is it empty,
# so a second check is required here.
if ip.netns.exists(namespace):
try:
kill_listen_processes(namespace)
except PidsInNamespaceException:
# This is unlikely since, at this point, we have SIGKILLed
# all remaining processes but if there are still some, log
# the error and continue with the cleanup
LOG.error(_LE('Not all processes were killed in %s'),
namespace)
for device in ip.get_devices():
unplug_device(conf, device)
ip.garbage_collect_namespace()
except Exception:
LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
def cleanup_network_namespaces(conf):
# Identify namespaces that are candidates for deletion.
candidates = [ns for ns in
ip_lib.IPWrapper.get_namespaces()
if eligible_for_deletion(conf, ns, conf.force)]
if candidates:
time.sleep(2)
for namespace in candidates:
destroy_namespace(conf, namespace, conf.force)
def main():
"""Main method for cleaning up network namespaces.
This method will make two passes checking for namespaces to delete. The
process will identify candidates, sleep, and call garbage collect. The
garbage collection will re-verify that the namespace meets the criteria for
deletion (ie it is empty). The period of sleep and the 2nd pass allow
time for the namespace state to settle, so that the check prior deletion
will re-confirm the namespace is empty.
The utility is designed to clean-up after the forced or unexpected
termination of Neutron agents.
The --force flag should only be used as part of the cleanup of a devstack
installation as it will blindly purge namespaces and their devices. This
option also kills any lingering DHCP instances.
"""
conf = setup_conf()
conf()
config.setup_logging()
cleanup_network_namespaces(conf)
| 35.836177 | 79 | 0.685238 |
70316535e1170ed5b9e53a00ad10b47ced870587 | 4,749 | py | Python | opsgenie_swagger/models/list_team_routing_rules_response.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | null | null | null | opsgenie_swagger/models/list_team_routing_rules_response.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | null | null | null | opsgenie_swagger/models/list_team_routing_rules_response.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | 1 | 2020-11-07T11:27:13.000Z | 2020-11-07T11:27:13.000Z | # coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.base_response import BaseResponse # noqa: F401,E501
from opsgenie_swagger.models.team_routing_rule import TeamRoutingRule # noqa: F401,E501
class ListTeamRoutingRulesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'took': 'float',
'data': 'list[TeamRoutingRule]'
}
attribute_map = {
'request_id': 'requestId',
'took': 'took',
'data': 'data'
}
def __init__(self, request_id=None, took=0.0, data=None): # noqa: E501
"""ListTeamRoutingRulesResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._took = None
self._data = None
self.discriminator = None
self.request_id = request_id
self.took = took
if data is not None:
self.data = data
@property
def request_id(self):
"""Gets the request_id of this ListTeamRoutingRulesResponse. # noqa: E501
:return: The request_id of this ListTeamRoutingRulesResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ListTeamRoutingRulesResponse.
:param request_id: The request_id of this ListTeamRoutingRulesResponse. # noqa: E501
:type: str
"""
if request_id is None:
raise ValueError("Invalid value for `request_id`, must not be `None`") # noqa: E501
self._request_id = request_id
@property
def took(self):
"""Gets the took of this ListTeamRoutingRulesResponse. # noqa: E501
:return: The took of this ListTeamRoutingRulesResponse. # noqa: E501
:rtype: float
"""
return self._took
@took.setter
def took(self, took):
"""Sets the took of this ListTeamRoutingRulesResponse.
:param took: The took of this ListTeamRoutingRulesResponse. # noqa: E501
:type: float
"""
if took is None:
raise ValueError("Invalid value for `took`, must not be `None`") # noqa: E501
self._took = took
@property
def data(self):
"""Gets the data of this ListTeamRoutingRulesResponse. # noqa: E501
:return: The data of this ListTeamRoutingRulesResponse. # noqa: E501
:rtype: list[TeamRoutingRule]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ListTeamRoutingRulesResponse.
:param data: The data of this ListTeamRoutingRulesResponse. # noqa: E501
:type: list[TeamRoutingRule]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTeamRoutingRulesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.935294 | 96 | 0.587703 |
c8124e7b34a02f9990e22ab9aa26bb00c963f7f6 | 1,918 | py | Python | library/examples.py | summerswallow/open-rocketry-tools | 981c3f0cd16aac142e8bc9e57fc86151ce864a7c | [
"MIT"
] | null | null | null | library/examples.py | summerswallow/open-rocketry-tools | 981c3f0cd16aac142e8bc9e57fc86151ce864a7c | [
"MIT"
] | null | null | null | library/examples.py | summerswallow/open-rocketry-tools | 981c3f0cd16aac142e8bc9e57fc86151ce864a7c | [
"MIT"
] | null | null | null | from nosecone.standard_nosecones import *
from misc import utils
if __name__ == '__main__':
from bodytubes.semroc import bt20
from bodytubes.semroc import bt5
array = utils.array(4, MM2IN, [
InvertedTangentOgiveNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
EllipticalNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
ConicalNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125, mid_diameter=.3),
BiconicNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125, mid_diameter=.3),
ParabolicNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
HaackSeriesNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
PowerSeriesNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
BluntedConicalNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
TangentOgiveNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
BluntedTangentOgiveNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
SecantOgiveNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3)])
utils.render_to_file(array, "examples/standard_nosecones.scad")
| 63.933333 | 119 | 0.622523 |
5161bee6a1215cb338dff9f0faaa1ea982c03401 | 2,849 | py | Python | InvenTree/InvenTree/version.py | TheCrazyMaffin/InvenTree | 2686f61a4ac279386a83049745339345f1ac4cf7 | [
"MIT"
] | null | null | null | InvenTree/InvenTree/version.py | TheCrazyMaffin/InvenTree | 2686f61a4ac279386a83049745339345f1ac4cf7 | [
"MIT"
] | null | null | null | InvenTree/InvenTree/version.py | TheCrazyMaffin/InvenTree | 2686f61a4ac279386a83049745339345f1ac4cf7 | [
"MIT"
] | null | null | null | """ Version information for InvenTree.
Provides information on the current InvenTree version
"""
import subprocess
import django
import re
import common.models
INVENTREE_SW_VERSION = "0.2.4 pre"
"""
Increment thi API version number whenever there is a significant change to the API that any clients need to know about
v3 -> 2021-05-22:
- The updated StockItem "history tracking" now uses a different interface
v4 -> 2021-06-01
- BOM items can now accept "variant stock" to be assigned against them
- Many slight API tweaks were needed to get this to work properly!
"""
INVENTREE_API_VERSION = 4
def inventreeInstanceName():
""" Returns the InstanceName settings for the current database """
return common.models.InvenTreeSetting.get_setting("INVENTREE_INSTANCE", "")
def inventreeInstanceTitle():
""" Returns the InstanceTitle for the current database """
if common.models.InvenTreeSetting.get_setting("INVENTREE_INSTANCE_TITLE", False):
return common.models.InvenTreeSetting.get_setting("INVENTREE_INSTANCE", "")
else:
return 'InvenTree'
def inventreeVersion():
""" Returns the InvenTree version string """
return INVENTREE_SW_VERSION
def inventreeVersionTuple(version=None):
""" Return the InvenTree version string as (maj, min, sub) tuple """
if version is None:
version = INVENTREE_SW_VERSION
match = re.match(r"^.*(\d+)\.(\d+)\.(\d+).*$", str(version))
return [int(g) for g in match.groups()]
def isInvenTreeUpToDate():
"""
Test if the InvenTree instance is "up to date" with the latest version.
A background task periodically queries GitHub for latest version,
and stores it to the database as INVENTREE_LATEST_VERSION
"""
latest = common.models.InvenTreeSetting.get_setting('INVENTREE_LATEST_VERSION', None)
# No record for "latest" version - we must assume we are up to date!
if not latest:
return True
# Extract "tuple" version (Python can directly compare version tuples)
latest_version = inventreeVersionTuple(latest)
inventree_version = inventreeVersionTuple()
return inventree_version >= latest_version
def inventreeApiVersion():
return INVENTREE_API_VERSION
def inventreeDjangoVersion():
""" Return the version of Django library """
return django.get_version()
def inventreeCommitHash():
""" Returns the git commit hash for the running codebase """
try:
return str(subprocess.check_output('git rev-parse --short HEAD'.split()), 'utf-8').strip()
except:
return None
def inventreeCommitDate():
""" Returns the git commit date for the running codebase """
try:
d = str(subprocess.check_output('git show -s --format=%ci'.split()), 'utf-8').strip()
return d.split(' ')[0]
except:
return None
| 27.394231 | 118 | 0.704809 |
0fbd36b7ac7bbe511e77682cbb05895909da4867 | 15,005 | py | Python | ds4se/infoxplainer/prediction/eval/traceability.py | WM-CSCI-435-F19/data-science-4-software-engineering | 3692163df710550d4ee5b399a2a184968a0f18c6 | [
"Apache-2.0"
] | 5 | 2020-12-08T00:38:24.000Z | 2021-11-16T20:00:59.000Z | ds4se/infoxplainer/prediction/eval/traceability.py | WM-CSCI-435-F19/data-science-4-software-engineering | 3692163df710550d4ee5b399a2a184968a0f18c6 | [
"Apache-2.0"
] | 110 | 2020-09-26T18:36:35.000Z | 2022-03-12T00:54:35.000Z | ds4se/infoxplainer/prediction/eval/traceability.py | WM-CSCI-435-F19/data-science-4-software-engineering | 3692163df710550d4ee5b399a2a184968a0f18c6 | [
"Apache-2.0"
] | 3 | 2020-12-09T19:23:10.000Z | 2021-02-16T12:54:16.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/4.6_infoxplainer.prediction.eval.traceability.ipynb (unless otherwise specified).
__all__ = ['SupervisedVectorEvaluation', 'ManifoldEntropy']
# Cell
from prg import prg
# Cell
import ds4se as ds
from ....mining.ir import VectorizationType
from ....mining.ir import SimilarityMetric
from ....mining.ir import EntropyMetric
from ....mining.ir import DistanceMetric
# Cell
#Description importation
from ....ds.description.eval.traceability import VectorEvaluation
# Cell
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
# Cell
import gensim
import pandas as pd
from itertools import product
from random import sample
import functools
import os
from enum import Enum, unique, auto
# Cell
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
from sklearn.metrics import auc
import math as m
import random as r
import collections
from sklearn.metrics.pairwise import cosine_similarity
import seaborn as sns
# Cell
from scipy.spatial import distance
from scipy.stats import pearsonr
# Cell
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
# Cell
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Cell
class SupervisedVectorEvaluation(VectorEvaluation):
def __init__(self, params):
super().__init__(params)
self.sys = params['system']
#Word2vec
similarities_w2v = self.sim_list_w2v + ['Linked?']
similarities_w2v = [str(i) for i in similarities_w2v]
self.df_filtered_w2v = self.df_w2v.copy()
self.df_filtered_w2v = self.df_filtered_w2v[similarities_w2v]
self.df_filtered_w2v = self.df_filtered_w2v[~self.df_filtered_w2v.isin([np.nan, np.inf, -np.inf]).any(1)]
#Doc2vec
similarities_d2v = self.sim_list_d2v + ['Linked?']
similarities_d2v = [str(i) for i in similarities_d2v]
self.df_filtered_d2v = self.df_d2v.copy()
self.df_filtered_d2v = self.df_filtered_d2v[similarities_d2v]
self.df_filtered_d2v = self.df_filtered_d2v[~self.df_filtered_d2v.isin([np.nan, np.inf, -np.inf]).any(1)]
def vecTypeVerification(self, vecType= VectorizationType.word2vec):
if vecType == VectorizationType.word2vec:
self.sim_list = self.sim_list_w2v
y_test = self.df_filtered_w2v['Linked?'].values
y_score = [self.df_filtered_w2v[ str(sim) ].values for sim in self.sim_list]
logging.info('Vectorization: ' + str(vecType) )
elif vecType == VectorizationType.doc2vec:
self.sim_list = self.sim_list_d2v
y_test = self.df_filtered_d2v['Linked?'].values
y_score = [self.df_filtered_d2v[ str(sim) ].values for sim in self.sim_list]
logging.info('Vectorization: ' + str(vecType) )
return y_test,y_score
def vecTypeVerificationSim(self, vecType= VectorizationType.word2vec,sim=SimilarityMetric.SCM_sim):
if vecType == VectorizationType.word2vec:
self.sim_list = self.sim_list_w2v
y_test = self.df_filtered_w2v['Linked?'].values
y_score = self.df_filtered_w2v[ str(sim) ].values
logging.info('Vectorization: ' + str(vecType) + " " + str(sim))
elif vecType == VectorizationType.doc2vec:
self.sim_list = self.sim_list_d2v
y_test = self.df_filtered_d2v['Linked?'].values
y_score = self.df_filtered_d2v[ str(sim) ].values
logging.info('Vectorization: ' + str(vecType) + " " + str(sim))
return y_test,y_score
def Compute_precision_recall_gain(self, vecType = VectorizationType.word2vec, sim=SimilarityMetric.SCM_sim):
'''One might choose PRG if there is little interest in identifying false negatives '''
y_test,y_score = self.vecTypeVerificationSim(vecType=vecType, sim=sim)
fig = go.Figure(layout_yaxis_range=[-0.05,1.02],layout_xaxis_range=[-0.05,1.02])
prg_curve = prg.create_prg_curve(y_test, y_score)
indices = np.arange(np.argmax(prg_curve['in_unit_square']) - 1,
len(prg_curve['in_unit_square']))
pg = prg_curve['precision_gain']
rg = prg_curve['recall_gain']
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],
line = dict(color="cyan", width=2,dash="solid")))
indices = np.logical_or(prg_curve['is_crossing'],
prg_curve['in_unit_square'])
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],
line = dict(color="blue", width=2,dash="solid")))
indices = np.logical_and(prg_curve['in_unit_square'],
True - prg_curve['is_crossing'])
fig.add_trace(go.Scatter(x=rg[indices], y=pg[indices],mode='markers'))
valid_points = np.logical_and( ~ np.isnan(rg), ~ np.isnan(pg))
upper_hull = prg.convex_hull(zip(rg[valid_points],pg[valid_points]))
rg_hull, pg_hull = zip(*upper_hull)
fig.add_trace(go.Scatter(x=rg_hull, y=pg_hull, mode = "lines",
line = dict(color="red", width=2,dash="dash")))
auprg = prg.calc_auprg(prg_curve)
logging.info('auprg: %.3f' % auprg)
logging.info("compute_precision_recall_gain Complete: "+str(sim))
fig.update_layout(
title=self.sys + "-[" + str(sim) + "]",
height = 600,
width = 600,
xaxis_title='Recall Gain',
xaxis = dict(
tickmode = 'linear',
tick0 = 0,
dtick = 0.25),
yaxis_title='Precision Gain',
yaxis = dict(
tickmode = 'linear',
tick0 = 0,
dtick = 0.25)
)
fig.update_yaxes(
scaleanchor = "x",
scaleratio = 1,
)
return fig
def Compute_avg_precision(self, vecType = VectorizationType.word2vec):
'''Generated precision-recall curve enhanced'''
y_test,y_score = self.vecTypeVerification(vecType=vecType)
linestyles = ['solid','dash','dashdot','dotted']
color = 'red'
# calculate the no skill line as the proportion of the positive class
no_skill = len(y_test[y_test==1]) / len(y_test)
fig = go.Figure()
fig.add_trace(go.Scatter(x=[0, 1], y=[no_skill, no_skill], name='No Skill [{0:0.2f}]'.format(no_skill), mode = "lines",
line = dict(color='red', width=.5, dash='dash')))
for count,sim in enumerate(self.sim_list):
precision, recall, _ = precision_recall_curve(y_test, y_score[count]) #compute precision-recall curve
average_precision = average_precision_score(y_test, y_score[count])
auc_score = auc(recall, precision)
logging.info('Average precision-recall score: {0:0.2f}'.format(average_precision))
logging.info('Precision-Recall AUC: %.2f' % auc_score)
fig.add_trace(go.Scatter(x=recall, y=precision, name=str(sim.name)+' [auc:{0:0.2f}]'.format(auc_score),
line = dict(color=color, width=1, dash=linestyles[count])))
##AUC
color = 'blue'
fig.add_trace(go.Scatter(x=[0, 1], y=[0, 1], name='No Skill', mode = "lines",
line = dict(color='blue', width=.5, dash='dot')))
for count,sim in enumerate(self.sim_list):
fpr, tpr, _ = roc_curve(y_test, y_score[count]) #compute roc curve
roc_auc = roc_auc_score(y_test, y_score[count])
logging.info('ROC AUC %.2f' % roc_auc)
fig.add_trace(go.Scatter(x=fpr, y=tpr, name=str(sim.name)+' [auc:{0:0.2f}]'.format(roc_auc),
line = dict(color=color, width=1, dash=linestyles[count])))
fig.update_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='recall [fpr]',
yaxis_title='tpr')
return fig
def Compute_avg_precision_same_plot(self, vecType = VectorizationType.word2vec):
'''Generated precision-recall curve'''
linestyles = ['solid','dash','dashdot','dotted']
fig = go.Figure()
color = 'red'
y_test,y_score = self.vecTypeVerification(vecType=vecType)
# calculate the no skill line as the proportion of the positive class
no_skill = len(y_test[y_test==1]) / len(y_test)
fig.add_trace(go.Scatter(x=[0, 1], y=[no_skill, no_skill], name='No Skill [{0:0.2f}]'.format(no_skill), mode = "lines",
line = dict(color='red', width=.5, dash='dash'))) #reference curve
for count,sim in enumerate(self.sim_list):
precision, recall, _ = precision_recall_curve(y_test, y_score[count]) #compute precision-recall curve
average_precision = average_precision_score(y_test, y_score[count])
auc_score = auc(recall, precision)
logging.info('Average precision-recall score: {0:0.2f}'.format(average_precision))
logging.info('Precision-Recall AUC: %.2f' % auc_score)
fig.add_trace(go.Scatter(x=recall, y=precision, name=str(sim.name)+' [auc:{0:0.2f}]'.format(auc_score),
line = dict(color=color, width=1, dash=linestyles[count]))) #plot model curve
fig.update_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='Recall',
yaxis_title='Precision')
return fig
def Compute_roc_curve(self, vecType = VectorizationType.word2vec):
linestyles = ['solid','dash','dashdot','dotted']
fig = go.Figure()
color = 'blue'
y_test,y_score = self.vecTypeVerification(vecType = vecType)
fig.add_trace(go.Scatter(x=[0, 1], y=[0, 1], name='No Skill', mode = "lines",
line = dict(color='blue', width=.5, dash='dot'))) #reference curve
for count,sim in enumerate(self.sim_list):
fpr, tpr, _ = roc_curve(y_test, y_score[count]) #compute roc curve
roc_auc = roc_auc_score(y_test, y_score[count])
logging.info('ROC AUC %.2f' % roc_auc)
fig.add_trace(go.Scatter(x=fpr, y=tpr, name=str(sim.name)+' [auc:{0:0.2f}]'.format(roc_auc),
line = dict(color=color, width=1, dash=linestyles[count]))) #plot model curve #plot model curve
fig.update_layout(
title=self.sys + "-[" + str(vecType) + "]",
xaxis_title='False Positive Rate',
yaxis_title='True Positive Rate')
return fig
def CofusionMatrix(self, vecType = VectorizationType.word2vec):
##TODO This implementatin is incomplete and not verify it yet
y_test,y_score = self.vecTypeVerification(vecType=vecType)
y_score_threshold = [0 if elem<=0.8 else 1 for elem in supevisedEval.y_score] #Hardcoded 0.7 Threshold
#TODO a Variation threshold analysis
tn, fp, fn, tp = confusion_matrix(supevisedEval.y_test, y_score_threshold).ravel()
return tn, fp, fn, tp
# Cell
class ManifoldEntropy(VectorEvaluation):
def __init__(self, params):
super().__init__(params)
self.sharedEntropy_filtered = self.sharedInfo.copy()
self.sharedEntropy_filtered.dropna(inplace=True)
self.sys = params['system']
def minimum_shared_entropy(self,dist = SimilarityMetric.WMD_sim, extropy=False):
'''Minimum Shared Plot'''
ent = EntropyMetric.MSI_I
color = 'dark blue'
if extropy:
ent = EntropyMetric.MSI_X
color = 'red'
columns = [str(i) for i in [ent, dist ]]
corr = self.compute_spearman_corr(self.sharedEntropy_filtered, columns)
logging.info('Correlation {%.2f}' % corr)
fig = px.scatter(self.sharedEntropy_filtered,
x = columns[0], y = columns[1], color_discrete_sequence=[color])
fig.update_layout(
title = self.sys +': ['+ dist.name + '-' + ent.name + '] Correlation {%.2f}' % corr
)
return fig
def manifold_entropy_plot(self, manifold = EntropyMetric.MI, dist = SimilarityMetric.WMD_sim):
'''Manifold Entropy'''
columns = [str(i) for i in [manifold, dist]]
corr = self.compute_spearman_corr(self.manifoldEntropy, columns)
logging.info('Correlation {%.2f}' % corr)
fig = px.scatter(self.manifoldEntropy,
x = columns[0], y = columns[1], color_continuous_scale=["dark blue"])
fig.update_layout(
title = self.sys +': ['+ dist.name + '-' + manifold.name + '] Correlation {%.2f}' % corr
)
return fig
def composable_entropy_plot(self,
manifold_x = EntropyMetric.MI,
manifold_y = EntropyMetric.Loss,
dist = SimilarityMetric.WMD_sim
):
columns = [str(i) for i in [manifold_x, manifold_y, dist]]
if isinstance(dist, str):
title = self.sys +': Information-Semantic Interactions '+ dist
else:
title = self.sys +': Information-Semantic Interactions '+ dist.name
fig = px.scatter(self.manifoldEntropy,x = columns[0], y = columns[1], color = columns[2],
color_continuous_scale=px.colors.sequential.Viridis)
fig.update_layout(
title = title
)
return fig
def composable_shared_plot(self,
manifold_x = EntropyMetric.MSI_I,
manifold_y = EntropyMetric.Loss,
dist = SimilarityMetric.WMD_sim,
drop_na = True
):
columns = [str(i) for i in [manifold_x, manifold_y, dist]]
if isinstance(dist, str):
title = self.sys +': Information-Semantic Interactions '+ dist
else:
title = self.sys +': Information-Semantic Interactions '+ dist.name
df = self.df_w2v
num_na = df.isna().sum().sum()
if drop_na:
df = df.dropna(inplace=False)
fig = px.scatter(df,x = columns[0], y = columns[1], color = columns[2],
color_continuous_scale=px.colors.sequential.Viridis)
fig.update_layout(
title = title
)
return fig, num_na
def compute_spearman_corr(self, filter_metrics_01, columns):
df_correlation = filter_metrics_01.copy()
correlation = df_correlation[columns].corr(method='spearman')
#correlation = df_correlation.corr(method='spearman')
return correlation[columns[0]].values[1] | 41.109589 | 129 | 0.615395 |
b2fb4dc8d8761a0136c832e5088647d1b22de151 | 6,906 | py | Python | cubert/cubert_tokenizer.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 1 | 2021-01-08T03:21:19.000Z | 2021-01-08T03:21:19.000Z | cubert/cubert_tokenizer.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | null | null | null | cubert/cubert_tokenizer.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utilities for source code tokenization."""
import abc
import tokenize
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Sequence
from typing import Text
from typing import Union
import dataclasses
from cubert import unified_tokenizer
# After all splitting, the longest a token is of the following length.
MAX_OUTPUT_TOKEN_LENGTH = 15
class CuBertTokenizer(abc.ABC):
"""A tokenizer that implements a language-agnostic tokenization.
The tokenizer implements a language-agnostic tokenization. This is available
as `tokenize_and_abstract()`.
"""
def __init__(self, max_output_token_length = MAX_OUTPUT_TOKEN_LENGTH,
reserved = ()):
self.types_to_skip = []
self.reserved = reserved
self.mappings: Dict[str, str]
self.update_mappings({
# By default, replace \n and \r. This is meant primarily for literals.
'\n':
unified_tokenizer.quote_special('NLCHAR'),
'\r':
unified_tokenizer.quote_special('CR'),
unified_tokenizer.SENTINEL:
unified_tokenizer.quote_special(unified_tokenizer.SENTINEL_ESCAPE),
})
self.max_output_token_length = max_output_token_length
@abc.abstractmethod
def tokenize_and_abstract(
self,
source_code):
"""Produces a language-agnostic tokenization of the input code.
Args:
source_code: Source code stored in a string.
Returns:
A list of pairs of a token (string) and a token kind in the given source
code. It always includes an end of sequence token. That is, an empty
input always returns a list of size 1.
Raises:
ValueError: if `source_code` cannot be tokenized.
"""
@abc.abstractmethod
def untokenize_abstract(self, whole_tokens):
"""Applies language-specific rules to an abstract untokenized list.
Args:
whole_tokens: Abstract tokens, reconstituted and unsanitized by
`untokenize` before passed to this language-specific logic.
Returns:
A string representing the untokenized text.
"""
def update_types_to_skip(
self, types_to_skip):
"""Replaces the set of token types that are ignored.
Each tokenizer may provide different semantics with respect to this list,
and may ignore it altogether.
Args:
types_to_skip: List of types (from the constants in the `token` module) or
`unified_tokenizer.TokenKind`. Note that some of those constants are
actually defined in the `tokenize` module.
"""
self.types_to_skip = types_to_skip
def replace_reserved_keywords(self, reserved):
"""Replaces the reserved keywords with the supplied list of strings.
Each tokenizer may provide different semantics with respect to the list
of reserved keywords, or ignore them altogether.
Args:
reserved: List of strings.
"""
self.reserved = reserved # Replace the old one entirely.
def update_mappings(self, mappings):
"""Replaces the character mappings with the supplied dictionary.
The intent for character mappings is to enable tokenizers that support them
to sanitize dangerous characters, such as newline and carriage return,
with a nicer symbol.
Each tokenizer may provide different semantics with respect to the
mappings, or ignore them altogether.
Args:
mappings: Dictionary of original to sanitized strings. Keys are expected
to have length 1.
Raises:
ValueError: if a key has length different from 1.
"""
unified_tokenizer.check_mappings(mappings)
self.mappings = mappings
def get_mappings(self):
return self.mappings
def condition_full_tokens(
self, agnostic
):
"""Applies reserved keywords and character sanitization."""
filtered: Iterable[unified_tokenizer.AbstractToken] = (
a for a in agnostic if a.kind not in self.types_to_skip)
# Now turn all reserved words, regardless of kind, into keywords.
with_reserved: Sequence[unified_tokenizer.AbstractToken] = tuple(
dataclasses.replace(a, kind=unified_tokenizer.TokenKind.KEYWORD)
if a.spelling in self.reserved else a
for a in filtered)
return with_reserved
def subtokenize_full_tokens(
self, agnostic
):
"""Performs heuristic splitting of full tokens."""
subtoken_lists = unified_tokenizer.subtokenize_agnostic_tokens_in_place(
agnostic_tokens=agnostic,
max_output_token_length=self.max_output_token_length,
sanitization_mapping=self.mappings,
sentinel=unified_tokenizer.SENTINEL)
return subtoken_lists
def tokenize(self, source_code):
"""Tokenizes via `tokenize_and_abstract`."""
try:
agnostic = self.tokenize_and_abstract(source_code)
except Exception as e:
raise ValueError('While trying to do language-specific tokenization for '
'the string:\n\n\n%r\n\n\n%s\n\n\n'
'we received error %r.' % (source_code, source_code, e))
conditioned = self.condition_full_tokens(agnostic)
multi_tokens = self.subtokenize_full_tokens(conditioned)
subtokens = unified_tokenizer.flatten_subtoken_lists(multi_tokens)
return subtokens
def untokenize(self, token_list):
"""Untokenizes via `untokenize_abstract`."""
# Untokenize agnostic.
if (not token_list or token_list[-1] != unified_tokenizer.quote_special(
unified_tokenizer.TokenKind.EOS.name)):
raise ValueError('Token list %r should end with the EOS token %r.' %
(token_list,
unified_tokenizer.quote_special(
unified_tokenizer.TokenKind.EOS.name)))
whole_tokens = unified_tokenizer.reconstitute_full_unsanitary_tokens(
token_list,
sanitization_mapping=self.mappings,
sentinel=unified_tokenizer.SENTINEL)
return self.untokenize_abstract(whole_tokens)
def token_from_token_type(token_type):
"""Turns a token type into a reserved token string."""
# We use the tok_name dict from tokenize, not token. The former has
# NL and COMMENT and such, whereas the latter doesn't.
return unified_tokenizer.quote_special(tokenize.tok_name[token_type])
| 34.358209 | 80 | 0.716623 |
c52e6bc68c89d4d33801a1b35e68adb1ba9ddfb1 | 3,051 | py | Python | 03_pyPRMS_prototype/Update HRUs in Parameter File.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | 03_pyPRMS_prototype/Update HRUs in Parameter File.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | 03_pyPRMS_prototype/Update HRUs in Parameter File.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:prms_py3]
# language: python
# name: conda-env-prms_py3-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import glob
import os
import pandas as pd
import sys
from pyPRMS.ParameterFile import ParameterFile
from pyPRMS.ControlFile import ControlFile
from pyPRMS.ValidParams import ValidParams
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/tmp/nvwsc'
# Original parameter file
src_param_file = f'{workdir}/Desch.params6'
# Altered single-HRU parameter file
hru_param_file = f'{workdir}/results/ALL.SCE_HRU141'
# %% [markdown]
# ### Load original parameter file
# %%
pfile_orig = ParameterFile(src_param_file)
# %% [markdown]
# ### Load altered, single-HRU parameter file
# %%
pfile_hru = ParameterFile(hru_param_file)
# %% [markdown]
# ### Get set of parameters that were altered
# <P>These parameters were overridden within the HRU parameter file (e.g. declared twice)</P>
# %%
print(pfile_hru.updated_params)
# %% [markdown]
# ### Check internal consistency of a parameter
# <P>Checks if number of values for the parameter matches the dimensions declared for the parameter</P>
# %%
pfile_orig.parameters['snarea_curve'].check()
# %% [markdown]
# ### Check all parameters
# <P>Checks internal consistency of all parameters plus any additional constraints or dependencies on other parameters. For example, the number of snarea_curve entries is checked against the number of unique hru_deplcrv values.</P>
# %%
pfile_orig.parameters.check()
# %% [markdown]
# ### Open each single-HRU parameter file and update modified parameters from the original parameter file
# %%
filelist = glob.glob(f'{workdir}/results/ALL.SCE_HRU*')
print(f'Processing {len(filelist)} HRUs')
for cfile in sorted(filelist):
chru = int(os.path.basename(cfile).split('HRU')[1])
sys.stdout.write(f'\rUpdating HRU {chru}: {cfile} ')
pfile_hru = ParameterFile(cfile)
for cparam in pfile_hru.updated_params:
# Arrays are 0-based in python
if cparam == 'snarea_curve':
# Special handling for snarea_curve
pfile.parameters[cparam].data.reshape((-1, 11))[chru-1,:] = pfile_hru.parameters[cparam].data
else:
pfile.parameters[cparam].data[chru-1] = pfile_hru.parameters[cparam].data[0]
# %%
### Write the parameters to a new parameter file
# %%
pfile.write_parameter_file('calibrated.param', header=['Generated by ParameterFile','version XX'])
# %%
# %%
cc = pfile.parameters['hru_deplcrv'].as_dataframe
cc.head()
# %%
if bb.index.size != cc.size:
print('More snarea_curve values specified than required by hru_deplcrv entries')
# %%
bb.rename(columns={k: k+1 for k in bb.columns},
index={k: k+1 for k in bb.index}, inplace=True)
# %%
bb.head()
# %%
| 25.638655 | 231 | 0.69846 |
73a51b728fb9070348b2861d5ca1bfdadfa179de | 4,651 | py | Python | pithy/format.py | gwk/glossy | 6976ca4fd1efc09d9cd670b1fe37817c05b4b529 | [
"CC0-1.0"
] | 7 | 2019-05-04T00:51:38.000Z | 2021-12-10T15:36:31.000Z | pithy/format.py | gwk/glossy | 6976ca4fd1efc09d9cd670b1fe37817c05b4b529 | [
"CC0-1.0"
] | null | null | null | pithy/format.py | gwk/glossy | 6976ca4fd1efc09d9cd670b1fe37817c05b4b529 | [
"CC0-1.0"
] | 1 | 2016-07-30T22:38:08.000Z | 2016-07-30T22:38:08.000Z | # Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
'Parse Python format strings and generate corresponding regular expressions.'
import re
from typing import Any, Iterable, Match, Pattern, Tuple
from .string import line_col_1
class FormatError(Exception): pass
fmt_re = re.compile(r'''(?x)
(?P<formatter>\{
(?P<name> [^{}!:]* )
(?: ! (?P<conv> [ars] ) )?
(?: : (?P<spec> (?: [^{}] | \{ [^{}]* \} )* ) )?
#^ for the spec, we allow a single level of nested formatters.
\})
| \{\{
| \}\}
| [^{}]+
''')
# Translated from standard docs "6.1.3.1. Format Specification Mini-Language".
fmt_spec_re = re.compile(r'''(?x)
(?: (?P<fill> . )? (?P<align> [<>=^]) )?
(?P<sign> [-+\ ] )?
(?P<alt> \# )?
(?P<zero> 0 ) ?
(?P<width> \d+ | \{ [^{}]* \} )? # note: nested format.
(?P<grouping> [_,] )?
(?: \. (?P<precision> \d+ | \{ [^{}]* \} ) )? # note: nested format.
(?P<type> [bcdeEfFgGnosxX%] )?
''')
spec_type_patterns = {
'd': r'\d'
}
spec_types = {
'd': int
}
def has_formatter(string: str) -> bool:
'Returns True if `string` contains a format pattern.'
for match in gen_format_matches(string):
if match.group('formatter'):
return True
return False
def count_formatters(fmt: str) -> int:
'Count the number of formatters in the string.'
count = 0
for match in gen_format_matches(fmt):
if match.group('formatter'):
count += 1
return count
def parse_formatters(fmt: str) -> Iterable[Tuple[str, str, str, type]]:
for match in gen_format_matches(fmt):
formatter = match.group('formatter')
if formatter is not None:
value_type: type = str
name, conv, spec = match.group('name', 'conv', 'spec')
assert isinstance(name, str), name
if spec:
spec_match = fmt_spec_re.fullmatch(spec)
if not spec_match: raise _exc(fmt, match.start(), f'invalid format spec: {spec!r}')
fill, align, sign, alt, zero, width, grouping, precision, type_ = spec_match.group(
'fill', 'align', 'sign', 'alt', 'zero', 'width', 'grouping', 'precision', 'type')
if type_:
try: value_type = spec_types[type_]
except KeyError as e: raise _exc(fmt, match.start(), f'spec type {type_!r} not implemented') from e
yield (name, conv or '', spec or '', value_type)
def format_partial(fmt: str, *args: str, **kwargs: Any) -> str:
args_it = iter(args)
def format_frag(match: Match[str]) -> str:
formatter = match.group('formatter')
if formatter:
name = match.group('name')
if name:
try: return formatter.format(**kwargs)
except KeyError: return formatter
else:
try: return formatter.format(next(args_it), **kwargs)
except (StopIteration, KeyError): return formatter
return match.group()
return ''.join(format_frag(m) for m in gen_format_matches(fmt))
def format_to_re(fmt: str, allow_empty=False, greedy=False) -> Pattern[str]:
'Translate a format string into a regular expression pattern.'
quantifier = ('*' if allow_empty else '+') + ('' if greedy else '?')
def pattern_from(match: Match[str]) -> str:
def exc(msg: str) -> FormatError: return _exc(fmt, match.start(), msg)
if match.group('formatter'):
pat = '.' + quantifier # Default pattern.
spec = match.group('spec')
if spec:
spec_match = fmt_spec_re.fullmatch(spec)
if not spec_match: raise exc(f'invalid format spec: {spec!r}')
fill, align, sign, alt, zero, width, grouping, precision, type_ = spec_match.group(
'fill', 'align', 'sign', 'alt', 'zero', 'width', 'grouping', 'precision', 'type')
if type_:
try: pat = spec_type_patterns[type_] + '+'
except KeyError as e: raise exc(f'spec type {type_!r} not implemented') from e
name = match.group('name')
if name: return f'(?P<{name}>{pat})'
else: return f'({pat})'
text = match.group()
if text == '{{': return r'\{'
if text == '}}': return r'\}'
return re.escape(text)
return re.compile(''.join(pattern_from(m) for m in gen_format_matches(fmt)))
def gen_format_matches(fmt: str) -> Iterable[Match]:
'Generate a sequence of match objects completely covering the format string.'
pos = 0
def exc() -> FormatError: return _exc(fmt, pos, f'invalid format character: {fmt[pos]!r}')
for match in fmt_re.finditer(fmt):
if match.start() != pos: raise exc()
pos = match.end()
yield match
if pos != len(fmt): raise exc()
def _exc(fmt: str, pos: int, msg: str) -> FormatError:
line, col = line_col_1(fmt, pos)
return FormatError(f'<str>:{line}:{col}: {msg}')
| 31.425676 | 109 | 0.616857 |
34292fdad1a5a46019e3268272ab9124bcc2cc9e | 10,295 | py | Python | datalad_hirni/tests/test_dicom2spec.py | loj/datalad-hirni | e03c1cbcbeb5e62cdc7c4537d6d5d9b93d5b139e | [
"MIT"
] | null | null | null | datalad_hirni/tests/test_dicom2spec.py | loj/datalad-hirni | e03c1cbcbeb5e62cdc7c4537d6d5d9b93d5b139e | [
"MIT"
] | null | null | null | datalad_hirni/tests/test_dicom2spec.py | loj/datalad-hirni | e03c1cbcbeb5e62cdc7c4537d6d5d9b93d5b139e | [
"MIT"
] | null | null | null | # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# -*- coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test dicom2spec command; DICOM metadata based specification creation"""
import os.path as op
from datalad.api import (
Dataset,
install
)
from datalad.tests.utils import (
assert_result_count,
assert_in,
ok_clean_git,
with_tempfile,
assert_equal
)
from datalad.utils import get_tempfile_kwargs
from datalad_neuroimaging.tests.utils import (
get_dicom_dataset,
)
from datalad.support.json_py import load_stream
from datalad_hirni.support.spec_helpers import (
get_specval,
has_specval
)
# TODO:
#
# - invalid calls
# - pass properties
# - test default rules
# - custom vs. configured specfile
# - test results
# - spec file in git? => should stay in git
class RawDataset(object):
def __init__(self):
self._dspath = None
def get_raw_dataset(self):
# Note: This is lazy to avoid building on import time, since import is part of nose's discovery and executed
# before the dependencies. This leads to datalad's ui backend not yet being correctly set, which in turn
# let's the cloning hang within progressbar generation.
if not self._dspath:
import tempfile
kwargs = get_tempfile_kwargs()
path = tempfile.mkdtemp(**kwargs)
f_dicoms = get_dicom_dataset('functional')
s_dicoms = get_dicom_dataset('structural')
ds = Dataset.create(path, cfg_proc=['hirni'])
ds.install(source=f_dicoms, path=op.join('func_acq', 'dicoms'))
ds.install(source=s_dicoms, path=op.join('struct_acq', 'dicoms'))
# Note: Recursive, since aggregation wasn't performed in the installed dastasets
ds.meta_aggregate([op.join('func_acq', 'dicoms'), op.join('struct_acq', 'dicoms')],
into='top',
recursive=True)
# TODO: Figure how to add it to things to be removed after tests ran
self._dspath = ds.path
return self._dspath
test_raw_ds = RawDataset()
@with_tempfile
def test_default_rules(path):
# ## SETUP a raw ds
ds = install(source=test_raw_ds.get_raw_dataset(), path=path)
# ## END SETUP
# create specs for dicomseries w/ default rules:
# TODO: spec path should prob. relate to `path` via (derived) acquisition!
ds.hirni_dicom2spec(path=op.join("func_acq", "dicoms"), spec=op.join("func_acq", "studyspec.json"))
ds.hirni_dicom2spec(path=op.join("struct_acq", "dicoms"), spec=op.join("struct_acq", "studyspec.json"))
func_spec = [s for s in load_stream(op.join(path, "func_acq", "studyspec.json"))]
for snippet in func_spec:
# type
assert_in("type", snippet.keys())
assert_in(snippet["type"], ["dicomseries", "dicomseries:all"])
# no comment in default spec
assert not has_specval(snippet, 'comment') or not get_specval(snippet, 'comment')
# description
assert has_specval(snippet, 'description')
assert_equal(get_specval(snippet, 'description'), "func_task-oneback_run-1")
# subject
assert has_specval(snippet, 'subject')
assert_equal(get_specval(snippet, 'subject'), '02')
# modality
assert has_specval(snippet, 'bids-modality')
assert_equal(get_specval(snippet, 'bids-modality'), 'bold')
# task
assert has_specval(snippet, "bids-task")
assert_equal(get_specval(snippet, "bids-task"), "oneback")
# run
assert has_specval(snippet, "bids-run")
assert_equal(get_specval(snippet, "bids-run"), "01")
# id
assert has_specval(snippet, "id")
assert_equal(get_specval(snippet, "id"), 401)
# should have 1 snippet of type dicomseries + 1 of type dicomseries:all
assert_equal(len(func_spec), 2)
assert_in("dicomseries", [s['type'] for s in func_spec])
assert_in("dicomseries:all", [s['type'] for s in func_spec])
struct_spec = [s for s in load_stream(op.join(path, "struct_acq", "studyspec.json"))]
for snippet in struct_spec:
# type
assert "type" in snippet.keys()
assert snippet["type"] in ["dicomseries", "dicomseries:all"]
# no comment in default spec
assert not has_specval(snippet, 'comment') or not get_specval(snippet, 'comment')
# description
assert has_specval(snippet, 'description')
assert_equal(get_specval(snippet, 'description'), "anat-T1w")
# subject
assert has_specval(snippet, 'subject')
assert_equal(get_specval(snippet, 'subject'), '02')
# modality
assert has_specval(snippet, 'bids-modality')
assert_equal(get_specval(snippet, 'bids-modality'), 't1w')
# run
assert has_specval(snippet, "bids-run")
assert_equal(get_specval(snippet, "bids-run"), "1")
# should have 1 snippet of type dicomseries + 1 of type dicomseries:all
assert_equal(len(struct_spec), 2)
assert_in("dicomseries", [s['type'] for s in struct_spec])
assert_in("dicomseries:all", [s['type'] for s in struct_spec])
@with_tempfile
def test_custom_rules(path):
# ## SETUP a raw ds
ds = install(source=test_raw_ds.get_raw_dataset(), path=path)
# ## END SETUP
# 1. simply default rules
ds.hirni_dicom2spec(path=op.join("struct_acq", "dicoms"), spec=op.join("struct_acq", "studyspec.json"))
struct_spec = [s for s in load_stream(op.join(path, "struct_acq", "studyspec.json"))]
for spec_snippet in struct_spec:
# no comment in default spec
assert not has_specval(spec_snippet, 'comment') or not get_specval(spec_snippet, 'comment')
# subject
assert has_specval(spec_snippet, 'subject')
assert_equal(get_specval(spec_snippet, 'subject'), '02')
# modality
assert has_specval(spec_snippet, 'bids-modality')
assert_equal(get_specval(spec_snippet, 'bids-modality'), 't1w')
# should have 1 snippet of type dicomseries + 1 of type dicomseries:all
assert_equal(len(struct_spec), 2)
assert_in("dicomseries", [s['type'] for s in struct_spec])
assert_in("dicomseries:all", [s['type'] for s in struct_spec])
# set config to use custom rules
import datalad_hirni
ds.config.add("datalad.hirni.dicom2spec.rules",
op.join(op.dirname(datalad_hirni.__file__),
'resources',
'rules',
'test_rules.py'),
)
# 2. do again with configured rules (rules 1)
import os
os.unlink(op.join(path, 'struct_acq', 'studyspec.json'))
ds.hirni_dicom2spec(path=op.join("struct_acq", "dicoms"), spec=op.join("struct_acq", "studyspec.json"))
struct_spec = [s for s in load_stream(op.join(path, "struct_acq", "studyspec.json"))]
# assertions wrt spec
for spec_snippet in struct_spec:
# now there's a comment in spec
assert has_specval(spec_snippet, 'comment')
assert_equal(get_specval(spec_snippet, 'comment'), "Rules1: These rules are for unit testing only")
# should have 1 snippet of type dicomseries + 1 of type dicomseries:all
assert_equal(len(struct_spec), 2)
assert_in("dicomseries", [s['type'] for s in struct_spec])
assert_in("dicomseries:all", [s['type'] for s in struct_spec])
# 3. once again with two configured rule sets (rules 1 and 2)
ds.config.add("datalad.hirni.dicom2spec.rules",
op.join(op.dirname(datalad_hirni.__file__),
'resources',
'rules',
'test_rules2.py'),
)
rule_files = ds.config.get("datalad.hirni.dicom2spec.rules")
# ensure assumption about order (dicom2spec relies on it):
assert_equal(rule_files,
(op.join(op.dirname(datalad_hirni.__file__),
'resources',
'rules',
'test_rules.py'),
op.join(op.dirname(datalad_hirni.__file__),
'resources',
'rules',
'test_rules2.py')
)
)
os.unlink(op.join(path, 'struct_acq', 'studyspec.json'))
ds.hirni_dicom2spec(path=op.join("struct_acq", "dicoms"), spec=op.join("struct_acq", "studyspec.json"))
struct_spec = [s for s in load_stream(op.join(path, "struct_acq", "studyspec.json"))]
# assertions wrt spec
for spec_snippet in struct_spec:
# Rule2 should have overwritten Rule1's comment:
assert has_specval(spec_snippet, 'comment')
assert_equal(get_specval(spec_snippet, 'comment'), "Rules2: These rules are for unit testing only")
# should have 1 snippet of type dicomseries + 1 of type dicomseries:all
assert_equal(len(struct_spec), 2)
assert_in("dicomseries", [s['type'] for s in struct_spec])
assert_in("dicomseries:all", [s['type'] for s in struct_spec])
@with_tempfile
def test_dicom2spec(path):
# ### SETUP ###
dicoms = get_dicom_dataset('structural')
ds = Dataset.create(path, cfg_proc=['hirni'])
ds.install(source=dicoms, path='acq100')
# Note: Recursive, since aggregation wasn't performed in the installed dastasets
# TODO: Use get_raw_sd from above instead of this setup
ds.meta_aggregate('acq100', into='top', recursive=True)
# ### END SETUP ###
# TODO: should it be specfile or acq/specfile? => At least doc needed,
# if not change
res = ds.hirni_dicom2spec(path='acq100', spec='spec_structural.json')
# check for actual location of spec_structural!
# => studyds root!
assert_result_count(res, 2)
assert_result_count(res, 1, path=op.join(ds.path, 'spec_structural.json'))
assert_result_count(res, 1, path=op.join(ds.path, '.gitattributes'))
ok_clean_git(ds.path)
| 37.98893 | 116 | 0.631277 |
6a6360571b2b811a1144843cfad3ed70d96f35ca | 1,680 | py | Python | setup.py | szabolcsdombi/vmath | 7deb6dfdf1bde304cca7016e2be51ad1947b4a78 | [
"MIT"
] | null | null | null | setup.py | szabolcsdombi/vmath | 7deb6dfdf1bde304cca7016e2be51ad1947b4a78 | [
"MIT"
] | null | null | null | setup.py | szabolcsdombi/vmath | 7deb6dfdf1bde304cca7016e2be51ad1947b4a78 | [
"MIT"
] | null | null | null | import sys
from setuptools import Extension, setup
extra_compile_args = []
if sys.platform.startswith('linux'):
extra_compile_args = ['-fpermissive', '-Wno-write-strings', '-Wno-narrowing']
if sys.platform.startswith('darwin'):
extra_compile_args = ['-std=c++11', '-Wno-writable-strings', '-Wno-c++11-narrowing']
ext = Extension(
name='vmath',
sources=['./vmath.cpp'],
define_macros=[('PY_SSIZE_T_CLEAN', None)],
extra_compile_args=extra_compile_args,
)
with open('README.md') as readme:
long_description = readme.read()
setup(
name='vmath',
version='0.3.0',
ext_modules=[ext],
data_files=[('.', ['vmath.pyi'])],
license='MIT',
python_requires='>=3.6',
platforms=['any'],
description='Compact Python OpenGL rendering library',
long_description=long_description,
long_description_content_type='text/markdown',
author='Szabolcs Dombi',
author_email='[email protected]',
url='https://github.com/szabolcsdombi/vmath/',
project_urls={
'Documentation': 'https://vmath.readthedocs.io/',
'Source': 'https://github.com/szabolcsdombi/vmath/',
'Bug Tracker': 'https://github.com/szabolcsdombi/vmath/issues/',
},
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Games/Entertainment',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Scientific/Engineering :: Visualization',
],
keywords=[
'vector',
'matrix',
'quaternion',
],
)
| 29.473684 | 88 | 0.633929 |
f38ed511088b59d32fe3216697b876b715be8004 | 41,492 | py | Python | tensorforce/agents/tensorforce.py | vishalbelsare/tensorforce | 085a62bd37e0fdfd05691db29edeb2e1714ffbda | [
"Apache-2.0"
] | 1,132 | 2019-01-03T14:41:04.000Z | 2022-03-29T07:44:50.000Z | tensorforce/agents/tensorforce.py | vishalbelsare/tensorforce | 085a62bd37e0fdfd05691db29edeb2e1714ffbda | [
"Apache-2.0"
] | 353 | 2019-01-02T19:46:10.000Z | 2022-03-30T10:38:45.000Z | tensorforce/agents/tensorforce.py | vishalbelsare/tensorforce | 085a62bd37e0fdfd05691db29edeb2e1714ffbda | [
"Apache-2.0"
] | 219 | 2019-01-03T16:55:39.000Z | 2022-03-14T00:27:05.000Z | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
import os
from random import shuffle
import numpy as np
from tensorforce import TensorforceError, util
from tensorforce.agents import Agent
from tensorforce.core import ArrayDict
from tensorforce.core.models import TensorforceModel
class TensorforceAgent(Agent):
"""
Tensorforce agent (specification key: `tensorforce`).
Highly configurable agent and basis for a broad class of deep reinforcement learning agents,
which act according to a policy parametrized by a neural network, leverage a memory module for
periodic updates based on batches of experience, and optionally employ a baseline/critic/target
policy for improved reward estimation.
Args:
states (specification): States specification
(<span style="color:#C00000"><b>required</b></span>, better implicitly specified via
`environment` argument for `Agent.create()`), arbitrarily nested dictionary of state
descriptions (usually taken from `Environment.states()`) with the following attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – state data type
(<span style="color:#00C000"><b>default</b></span>: "float").</li>
<li><b>shape</b> (<i>int | iter[int]</i>) – state shape
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>num_values</b> (<i>int > 0</i>) – number of discrete state values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum state value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
actions (specification): Actions specification
(<span style="color:#C00000"><b>required</b></span>, better implicitly specified via
`environment` argument for `Agent.create()`), arbitrarily nested dictionary of
action descriptions (usually taken from `Environment.actions()`) with the following
attributes:
<ul>
<li><b>type</b> (<i>"bool" | "int" | "float"</i>) – action data type
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>shape</b> (<i>int > 0 | iter[int > 0]</i>) – action shape
(<span style="color:#00C000"><b>default</b></span>: scalar).</li>
<li><b>num_values</b> (<i>int > 0</i>) – number of discrete action values
(<span style="color:#C00000"><b>required</b></span> for type "int").</li>
<li><b>min_value/max_value</b> (<i>float</i>) – minimum/maximum action value
(<span style="color:#00C000"><b>optional</b></span> for type "float").</li>
</ul>
max_episode_timesteps (int > 0): Upper bound for numer of timesteps per episode
(<span style="color:#00C000"><b>default</b></span>: not given, better implicitly
specified via `environment` argument for `Agent.create()`).
policy (specification): Policy configuration, see [networks](../modules/networks.html) and
[policies documentation](../modules/policies.html)
(<span style="color:#00C000"><b>default</b></span>: action distributions or value
functions parametrized by an automatically configured network).
memory (int | specification): Replay memory capacity, or memory configuration, see the
[memories documentation](../modules/memories.html)
(<span style="color:#00C000"><b>default</b></span>: minimum capacity recent memory).
update (int | specification): Model update configuration with the following attributes
(<span style="color:#C00000"><b>required</b>,
<span style="color:#00C000"><b>default</b></span>: timesteps batch size</span>):
<ul>
<li><b>unit</b> (<i>"timesteps" | "episodes"</i>) – unit for update attributes
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>batch_size</b>
(<i><a href="../modules/parameters.html">parameter</a>, int > 0</i>) –
size of update batch in number of units
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>frequency</b>
(<i>"never" | <a href="../modules/parameters.html">parameter</a>, int > 0 | 0.0 < float <= 1.0</i>) –
frequency of updates, relative to batch_size if float
(<span style="color:#00C000"><b>default</b></span>: batch_size).</li>
<li><b>start</b>
(<i><a href="../modules/parameters.html">parameter</a>, int >= batch_size</i>) –
number of units before first update
(<span style="color:#00C000"><b>default</b></span>: none).</li>
</ul>
optimizer (specification): Optimizer configuration, see the
[optimizers documentation](../modules/optimizers.html)
(<span style="color:#00C000"><b>default</b></span>: Adam optimizer).
objective (specification): Optimization objective configuration, see the
[objectives documentation](../modules/objectives.html)
(<span style="color:#C00000"><b>required</b></span>).
reward_estimation (specification): Reward estimation configuration with the following
attributes (<span style="color:#C00000"><b>required</b></span>):
<ul>
<li><b>horizon</b>
(<i>"episode" | <a href="../modules/parameters.html">parameter</a>, int >= 1</i>)
– Horizon of discounted-sum return estimation
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>discount</b>
(<i><a href="../modules/parameters.html">parameter</a>, 0.0 <= float <= 1.0</i>) –
Discount factor of future rewards for discounted-sum return estimation
(<span style="color:#00C000"><b>default</b></span>: 1.0).</li>
<li><b>predict_horizon_values</b> (<i>false | "early" | "late"</i>) – Whether to
include a baseline prediction of the horizon value as part of the return estimation, and
if so, whether to compute the horizon value prediction "early" when experiences are
stored to memory, or "late" when batches of experience are retrieved for the update
(<span style="color:#00C000"><b>default</b></span>: "late" if baseline_policy or
baseline_objective are specified, else false).</li>
<li><b>estimate_advantage</b> (<i>False | "early" | "late"</i>) – Whether to use
an estimate of the advantage (return minus baseline value prediction) instead of the
return as learning signal, and whether to do so late after the baseline update
(default) or early before the baseline update
(<span style="color:#00C000"><b>default</b></span>: false, unless baseline_policy is
specified but baseline_objective/optimizer are not).</li>
<li><b>predict_action_values</b> (<i>bool</i>) – Whether to predict state-action-
instead of state-values as horizon values and for advantage estimation
(<span style="color:#00C000"><b>default</b></span>: false).</li>
<li><b>reward_processing</b> (<i>specification)</i>) – Reward preprocessing as
layer or list of layers, see the
[preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no reward processing).</li>
<li><b>return_processing</b> (<i>specification</i>) – Return processing as layer
or list of layers, see the [preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no return processing).</li>
<li><b>advantage_processing</b> (<i>specification</i>) – Advantage processing as
layer or list of layers, see the [preprocessing documentation](../modules/preprocessing.html)
(<span style="color:#00C000"><b>default</b></span>: no advantage processing).</li>
<li><b>predict_terminal_values</b> (<i>bool</i>) – Whether to predict the value
of terminal states, usually not required since max_episode_timesteps terminals are
handled separately
(<span style="color:#00C000"><b>default</b></span>: false).</li>
</ul>
baseline (specification): Baseline configuration, policy will be used as baseline if none,
see [networks](../modules/networks.html) and potentially
[policies documentation](../modules/policies.html)
(<span style="color:#00C000"><b>default</b></span>: none).
baseline_optimizer (specification | <a href="../modules/parameters.html">parameter</a>, float > 0.0):
Baseline optimizer configuration, see the
[optimizers documentation](../modules/optimizers.html),
main optimizer will be used for baseline if none, a float implies none and specifies a
custom weight for the baseline loss
(<span style="color:#00C000"><b>default</b></span>: none).
baseline_objective (specification): Baseline optimization objective configuration, see the
[objectives documentation](../modules/objectives.html),
required if baseline optimizer is specified, main objective will be used for baseline if
baseline objective and optimizer are not specified
(<span style="color:#00C000"><b>default</b></span>: none).
l2_regularization (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
L2 regularization loss weight
(<span style="color:#00C000"><b>default</b></span>: no L2 regularization).
entropy_regularization (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
Entropy regularization loss weight, to discourage the policy distribution from being
"too certain"
(<span style="color:#00C000"><b>default</b></span>: no entropy regularization).
state_preprocessing (dict[specification]): State preprocessing as layer or list of layers,
see the [preprocessing documentation](../modules/preprocessing.html),
specified per state-type or -name
(<span style="color:#00C000"><b>default</b></span>: linear normalization of bounded
float states to [-2.0, 2.0]).
exploration (<a href="../modules/parameters.html">parameter</a> | dict[<a href="../modules/parameters.html">parameter</a>], float >= 0.0):
Exploration, defined as the probability for uniformly random output in case of `bool`
and `int` actions, and the standard deviation of Gaussian noise added to every output in
case of `float` actions, specified globally or per action-type or -name
(<span style="color:#00C000"><b>default</b></span>: no exploration).
variable_noise (<a href="../modules/parameters.html">parameter</a>, float >= 0.0):
Add Gaussian noise with given standard deviation to all trainable variables, as
alternative exploration mechanism
(<span style="color:#00C000"><b>default</b></span>: no variable noise).
parallel_interactions (int > 0): Maximum number of parallel interactions to support,
for instance, to enable multiple parallel episodes, environments or agents within an
environment
(<span style="color:#00C000"><b>default</b></span>: 1).
config (specification): Additional configuration options:
<ul>
<li><b>name</b> (<i>string</i>) – Agent name, used e.g. for TensorFlow scopes and
saver default filename
(<span style="color:#00C000"><b>default</b></span>: "agent").
<li><b>device</b> (<i>string</i>) – Device name
(<span style="color:#00C000"><b>default</b></span>: CPU). Different from (un)supervised
deep learning, RL does not always benefit from running on a GPU, depending on
environment and agent configuration. In particular for RL-typical environments with
low-dimensional state spaces (i.e., no images), one usually gets better performance by
running on CPU only. Consequently, Tensorforce is configured to run on CPU by default,
which can be changed, for instance, by setting this value to 'GPU' instead.
<li><b>seed</b> (<i>int</i>) – Random seed to set for Python, NumPy (both set
globally!) and TensorFlow, environment seed may have to be set separately for fully
deterministic execution, generally not recommended since results in a fully
deterministic setting are less meaningful/representative
(<span style="color:#00C000"><b>default</b></span>: none).</li>
<li><b>buffer_observe</b> (<i>false | "episode" | int > 0</i>) – Number of
timesteps within an episode to buffer before calling the internal observe function, to
reduce calls to TensorFlow for improved performance
(<span style="color:#00C000"><b>default</b></span>: configuration-specific maximum
number which can be buffered without affecting performance).</li>
<li><b>enable_int_action_masking</b> (<i>bool</i>) – Whether int action options
can be masked via an optional "[ACTION-NAME]_mask" state input
(<span style="color:#00C000"><b>default</b></span>: true).</li>
<li><b>create_tf_assertions</b> (<i>bool</i>) – Whether to create internal
TensorFlow assertion operations
(<span style="color:#00C000"><b>default</b></span>: true).</li>
<li><b>eager_mode</b> (<i>bool</i>) – Whether to run functions eagerly instead of
running as a traced graph function, can be helpful for debugging
(<span style="color:#00C000"><b>default</b></span>: false).</li>
<li><b>tf_log_level</b> (<i>int >= 0</i>) – TensorFlow log level, additional C++
logging messages can be enabled by setting os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"/"2"
before importing Tensorforce/TensorFlow
(<span style="color:#00C000"><b>default</b></span>: 40, only error and critical).</li>
</ul>
saver (path | specification): TensorFlow checkpoints directory, or checkpoint manager
configuration with the following attributes, for periodic implicit saving as alternative
to explicit saving via agent.save()
(<span style="color:#00C000"><b>default</b></span>: no saver):
<ul>
<li><b>directory</b> (<i>path</i>) – checkpoint directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>filename</b> (<i>string</i>) – checkpoint filename
(<span style="color:#00C000"><b>default</b></span>: agent name).</li>
<li><b>frequency</b> (<i>int > 0</i>) – how frequently to save a checkpoint
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>unit</b> (<i>"timesteps" | "episodes" | "updates"</i>) – frequency unit
(<span style="color:#00C000"><b>default</b></span>: updates).</li>
<li><b>max_checkpoints</b> (<i>int > 0</i>) – maximum number of checkpoints to
keep (<span style="color:#00C000"><b>default</b></span>: 10).</li>
<li><b>max_hour_frequency</b> (<i>int > 0</i>) – ignoring max-checkpoints,
definitely keep a checkpoint in given hour frequency
(<span style="color:#00C000"><b>default</b></span>: none).</li>
</ul>
summarizer (path | specification): TensorBoard summaries directory, or summarizer
configuration with the following attributes
(<span style="color:#00C000"><b>default</b></span>: no summarizer):
<ul>
<li><b>directory</b> (<i>path</i>) – summarizer directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>filename</b> (<i>path</i>) – summarizer filename, max_summaries does not
apply if name specified
(<span style="color:#00C000"><b>default</b></span>: "summary-%Y%m%d-%H%M%S").</li>
<li><b>max_summaries</b> (<i>int > 0</i>) – maximum number of (generically-named)
summaries to keep
(<span style="color:#00C000"><b>default</b></span>: 7, number of different colors in
Tensorboard).</li>
<li><b>flush</b> (<i>int > 0</i>) – how frequently in seconds to flush the
summary writer (<span style="color:#00C000"><b>default</b></span>: 10).</li>
<li><b>summaries</b> (<i>"all" | iter[string]</i>) – which summaries to record,
"all" implies all numerical summaries, so all summaries except "graph"
(<span style="color:#00C000"><b>default</b></span>: "all"):</li>
<li>"action-value": value of each action (timestep-based)</li>
<li>"distribution": distribution parameters like probabilities or mean and stddev
(timestep-based)</li>
<li>"entropy": entropy of (per-action) policy distribution(s) (timestep-based)</li>
<li>"graph": computation graph</li>
<li>"kl-divergence": KL-divergence of previous and updated (per-action) policy
distribution(s) (update-based)</li>
<li>"loss": policy and baseline loss plus loss components (update-based)</li>
<li>"parameters": parameter values (according to parameter unit)</li>
<li>"reward": reward per timestep, episode length and reward, plus intermediate
reward/return/advantage estimates and processed values
(timestep/episode/update-based)</li>
<li>"update-norm": global norm of update (update-based)</li>
<li>"updates": mean and variance of update tensors per variable (update-based)</li>
<li>"variables": mean of trainable variables tensors (update-based)</li>
</ul>
tracking ("all" | iter[string]): Which tensors to track, available values are a subset of
the values of summarizer[summaries] above
(<span style="color:#00C000"><b>default</b></span>: no tracking).
The current value of tracked tensors can be retrieved via tracked_tensors() at any time,
however, note that tensor values change at different timescales (timesteps, episodes,
updates).
recorder (path | specification): Traces recordings directory, or recorder configuration with
the following attributes (see
[record-and-pretrain script](https://github.com/tensorforce/tensorforce/blob/master/examples/record_and_pretrain.py)
for example application)
(<span style="color:#00C000"><b>default</b></span>: no recorder):
<ul>
<li><b>directory</b> (<i>path</i>) – recorder directory
(<span style="color:#C00000"><b>required</b></span>).</li>
<li><b>frequency</b> (<i>int > 0</i>) – how frequently in episodes to record
traces (<span style="color:#00C000"><b>default</b></span>: every episode).</li>
<li><b>start</b> (<i>int >= 0</i>) – how many episodes to skip before starting to
record traces (<span style="color:#00C000"><b>default</b></span>: 0).</li>
<li><b>max-traces</b> (<i>int > 0</i>) – maximum number of traces to keep
(<span style="color:#00C000"><b>default</b></span>: all).</li>
"""
def __init__(
# Required
self, states, actions, update, optimizer, objective, reward_estimation,
# Environment
max_episode_timesteps=None,
# Agent
policy='auto', memory=None,
# Baseline
baseline=None, baseline_optimizer=None, baseline_objective=None,
# Regularization
l2_regularization=0.0, entropy_regularization=0.0,
# Preprocessing
state_preprocessing='linear_normalization',
# Exploration
exploration=0.0, variable_noise=0.0,
# Parallel interactions
parallel_interactions=1,
# Config, saver, summarizer, tracking, recorder
config=None, saver=None, summarizer=None, tracking=None, recorder=None,
# Deprecated
**kwargs
):
if 'estimate_actions' in reward_estimation:
raise TensorforceError.deprecated(
name='Agent', argument='reward_estimation[estimate_actions]',
replacement='reward_estimation[predict_action_values]'
)
if 'estimate_terminal' in reward_estimation:
raise TensorforceError.deprecated(
name='Agent', argument='reward_estimation[estimate_terminal]',
replacement='reward_estimation[predict_terminal_values]'
)
if summarizer is not None and 'labels' in summarizer:
raise TensorforceError.deprecated(
name='Agent', argument='summarizer[labels]', replacement='summarizer[summaries]'
)
if 'baseline_policy' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='baseline_policy', replacement='baseline'
)
if 'reward_preprocessing' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='reward_preprocessing',
replacement='reward_estimation[reward_processing]'
)
if 'name' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='name', replacement='config[name]'
)
if 'buffer_observe' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='buffer_observe', replacement='config[buffer_observe]'
)
if 'device' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='device', replacement='config[device]'
)
if 'seed' in kwargs:
raise TensorforceError.deprecated(
name='Agent', argument='seed', replacement='config[seed]'
)
if len(kwargs) > 0:
raise TensorforceError.invalid(name='Agent', argument=', '.join(kwargs))
if not hasattr(self, 'spec'):
self.spec = OrderedDict(
agent='tensorforce',
# Environment
states=states, actions=actions, max_episode_timesteps=max_episode_timesteps,
# Agent
policy=policy, memory=memory, update=update, optimizer=optimizer,
objective=objective, reward_estimation=reward_estimation,
# Baseline
baseline=baseline, baseline_optimizer=baseline_optimizer,
baseline_objective=baseline_objective,
# Regularization
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
# Preprocessing
state_preprocessing=state_preprocessing,
# Exploration
exploration=exploration, variable_noise=variable_noise,
# Parallel interactions
parallel_interactions=parallel_interactions,
# Config, saver, summarizer, recorder
config=config, saver=saver, summarizer=summarizer, tracking=tracking,
recorder=recorder
)
if memory is None:
memory = dict(type='recent')
if isinstance(update, int):
update = dict(unit='timesteps', batch_size=update)
if config is None:
config = dict()
else:
config = dict(config)
# TODO: should this change if summarizer is specified?
if parallel_interactions > 1:
if 'buffer_observe' not in config:
if max_episode_timesteps is None:
raise TensorforceError.required(
name='Agent', argument='max_episode_timesteps',
condition='parallel_interactions > 1'
)
config['buffer_observe'] = 'episode'
# elif config['buffer_observe'] < max_episode_timesteps:
# raise TensorforceError.value(
# name='Agent', argument='config[buffer_observe]',
# hint='< max_episode_timesteps', condition='parallel_interactions > 1'
# )
elif update['unit'] == 'timesteps':
update_frequency = update.get('frequency', update['batch_size'])
if 'buffer_observe' not in config:
if isinstance(update_frequency, int):
config['buffer_observe'] = update_frequency
else:
config['buffer_observe'] = 1
elif isinstance(update_frequency, int) and (
config['buffer_observe'] == 'episode' or config['buffer_observe'] > update_frequency
):
raise TensorforceError.value(
name='Agent', argument='config[buffer_observe]', value=config['buffer_observe'],
hint='> update[frequency]', condition='update[unit] = "timesteps"'
)
elif update['unit'] == 'episodes':
if 'buffer_observe' not in config:
config['buffer_observe'] = 'episode'
# reward_estimation = dict(reward_estimation)
# if reward_estimation['horizon'] == 'episode':
# if max_episode_timesteps is None:
# raise TensorforceError.required(
# name='Agent', argument='max_episode_timesteps',
# condition='reward_estimation[horizon] = "episode"'
# )
# reward_estimation['horizon'] = max_episode_timesteps
super().__init__(
states=states, actions=actions, max_episode_timesteps=max_episode_timesteps,
parallel_interactions=parallel_interactions, config=config, recorder=recorder
)
self.model = TensorforceModel(
states=self.states_spec, actions=self.actions_spec,
max_episode_timesteps=self.max_episode_timesteps,
policy=policy, memory=memory, update=update, optimizer=optimizer, objective=objective,
reward_estimation=reward_estimation,
baseline=baseline, baseline_optimizer=baseline_optimizer,
baseline_objective=baseline_objective,
l2_regularization=l2_regularization, entropy_regularization=entropy_regularization,
state_preprocessing=state_preprocessing,
exploration=exploration, variable_noise=variable_noise,
parallel_interactions=self.parallel_interactions,
config=self.config, saver=saver, summarizer=summarizer, tracking=tracking
)
def experience(self, states, actions, terminal, reward, internals=None):
"""
Feed experience traces.
See the [act-experience-update script](https://github.com/tensorforce/tensorforce/blob/master/examples/act_experience_update_interface.py)
for an example application as part of the act-experience-update interface, which is an
alternative to the act-observe interaction pattern.
Args:
states (dict[array[state]]): Dictionary containing arrays of states
(<span style="color:#C00000"><b>required</b></span>).
actions (dict[array[action]]): Dictionary containing arrays of actions
(<span style="color:#C00000"><b>required</b></span>).
terminal (array[bool]): Array of terminals
(<span style="color:#C00000"><b>required</b></span>).
reward (array[float]): Array of rewards
(<span style="color:#C00000"><b>required</b></span>).
internals (dict[state]): Dictionary containing arrays of internal agent states
(<span style="color:#C00000"><b>required</b></span> if agent has internal states).
"""
if not all(len(buffer) == 0 for buffer in self.terminal_buffer):
raise TensorforceError(message="Calling agent.experience is not possible mid-episode.")
# Process states input and infer batching structure
states, batched, num_instances, is_iter_of_dicts = self._process_states_input(
states=states, function_name='Agent.experience'
)
if is_iter_of_dicts:
# Input structure iter[dict[input]]
# Internals
if internals is None:
internals = ArrayDict(self.initial_internals())
internals = internals.fmap(function=(lambda x: np.repeat(np.expand_dims(x, axis=0), repeats=num_instances, axis=0)))
elif not isinstance(internals, (tuple, list)):
raise TensorforceError.type(
name='Agent.experience', argument='internals', dtype=type(internals),
hint='is not tuple/list'
)
else:
internals = [ArrayDict(internal) for internal in internals]
internals = internals[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=internals[1:]
)
# Actions
if isinstance(actions, np.ndarray):
actions = ArrayDict(singleton=actions)
elif not isinstance(actions, (tuple, list)):
raise TensorforceError.type(
name='Agent.experience', argument='actions', dtype=type(actions),
hint='is not tuple/list'
)
elif not isinstance(actions[0], dict):
actions = ArrayDict(singleton=np.asarray(actions))
elif all(list(action) == ['action'] for action in actions):
actions = [ArrayDict(singleton=action['action']) for action in actions]
actions = actions[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=actions[1:]
)
else:
actions = [ArrayDict(action) for action in actions]
actions = actions[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=actions[1:]
)
else:
# Input structure dict[iter[input]]
# Internals
if internals is None:
internals = ArrayDict(self.initial_internals())
internals = internals.fmap(function=(lambda x: np.tile(np.expand_dims(x, axis=0), reps=(num_instances,))))
elif not isinstance(internals, dict):
raise TensorforceError.type(
name='Agent.experience', argument='internals', dtype=type(internals),
hint='is not dict'
)
else:
internals = ArrayDict(internals)
# Actions
if isinstance(actions, np.ndarray):
actions = ArrayDict(singleton=actions)
elif not isinstance(actions, dict):
raise TensorforceError.type(
name='Agent.experience', argument='actions', dtype=type(actions),
hint='is not dict'
)
elif list(actions) == ['action']:
actions = ArrayDict(singleton=actions['action'])
else:
actions = ArrayDict(actions)
# Expand inputs if not batched
if not batched:
internals = internals.fmap(function=(lambda x: np.expand_dims(x, axis=0)))
actions = actions.fmap(function=(lambda x: np.expand_dims(x, axis=0)))
terminal = np.asarray([terminal])
reward = np.asarray([reward])
else:
terminal = np.asarray(terminal)
reward = np.asarray(reward)
# Check number of inputs
for name, internal in internals.items():
if internal.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(internals[{}])'.format(name),
value=internal.shape[0], hint='!= len(states)'
)
for name, action in actions.items():
if action.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(actions[{}])'.format(name),
value=action.shape[0], hint='!= len(states)'
)
if terminal.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(terminal)'.format(name),
value=terminal.shape[0], hint='!= len(states)'
)
if reward.shape[0] != num_instances:
raise TensorforceError.value(
name='Agent.experience', argument='len(reward)'.format(name),
value=reward.shape[0], hint='!= len(states)'
)
def function(name, spec):
auxiliary = ArrayDict()
if self.config.enable_int_action_masking and spec.type == 'int' and \
spec.num_values is not None:
if name is None:
name = 'action'
# Mask, either part of states or default all true
auxiliary['mask'] = states.pop(name + '_mask', np.ones(
shape=(num_instances,) + spec.shape + (spec.num_values,), dtype=spec.np_type()
))
return auxiliary
auxiliaries = self.actions_spec.fmap(function=function, cls=ArrayDict, with_names=True)
if self.states_spec.is_singleton() and not states.is_singleton():
states[None] = states.pop('state')
# Convert terminal to int if necessary
if terminal.dtype is util.np_dtype(dtype='bool'):
zeros = np.zeros_like(terminal, dtype=util.np_dtype(dtype='int'))
ones = np.ones_like(terminal, dtype=util.np_dtype(dtype='int'))
terminal = np.where(terminal, ones, zeros)
if terminal[-1] == 0:
raise TensorforceError(message="Agent.experience() requires full episodes as input.")
# Batch experiences split into episodes and at most size buffer_observe
last = 0
for index in range(1, len(terminal) + 1):
if terminal[index - 1] == 0:
continue
function = (lambda x: x[last: index])
states_batch = states.fmap(function=function)
internals_batch = internals.fmap(function=function)
auxiliaries_batch = auxiliaries.fmap(function=function)
actions_batch = actions.fmap(function=function)
terminal_batch = function(terminal)
reward_batch = function(reward)
last = index
# Inputs to tensors
states_batch = self.states_spec.to_tensor(
value=states_batch, batched=True, name='Agent.experience states'
)
internals_batch = self.internals_spec.to_tensor(
value=internals_batch, batched=True, recover_empty=True,
name='Agent.experience internals'
)
auxiliaries_batch = self.auxiliaries_spec.to_tensor(
value=auxiliaries_batch, batched=True, name='Agent.experience auxiliaries'
)
actions_batch = self.actions_spec.to_tensor(
value=actions_batch, batched=True, name='Agent.experience actions'
)
terminal_batch = self.terminal_spec.to_tensor(
value=terminal_batch, batched=True, name='Agent.experience terminal'
)
reward_batch = self.reward_spec.to_tensor(
value=reward_batch, batched=True, name='Agent.experience reward'
)
# Model.experience()
timesteps, episodes = self.model.experience(
states=states_batch, internals=internals_batch, auxiliaries=auxiliaries_batch,
actions=actions_batch, terminal=terminal_batch, reward=reward_batch
)
self.timesteps = timesteps.numpy().item()
self.episodes = episodes.numpy().item()
if self.model.saver is not None:
self.model.save()
def update(self, query=None, **kwargs):
"""
Perform an update.
See the [act-experience-update script](https://github.com/tensorforce/tensorforce/blob/master/examples/act_experience_update_interface.py)
for an example application as part of the act-experience-update interface, which is an
alternative to the act-observe interaction pattern.
"""
updates = self.model.update()
self.updates = updates.numpy().item()
if self.model.saver is not None:
self.model.save()
def pretrain(self, directory, num_iterations, num_traces=1, num_updates=1, extension='.npz'):
"""
Simple pretraining approach as a combination of `experience()` and `update`, akin to
behavioral cloning, using experience traces obtained e.g. via recording agent interactions
([see documentation](https://tensorforce.readthedocs.io/en/latest/basics/features.html#record-pretrain)).
For the given number of iterations, load the given number of trace files (which each contain
recorder[frequency] episodes), feed the experience to the agent's internal memory, and
subsequently trigger the given number of updates (which will use the experience in the
internal memory, fed in this or potentially previous iterations).
See the [record-and-pretrain script](https://github.com/tensorforce/tensorforce/blob/master/examples/record_and_pretrain.py)
for an example application.
Args:
directory (path): Directory with experience traces, e.g. obtained via recorder; episode
length has to be consistent with agent configuration
(<span style="color:#C00000"><b>required</b></span>).
num_iterations (int > 0): Number of iterations consisting of loading new traces and
performing multiple updates
(<span style="color:#C00000"><b>required</b></span>).
num_traces (int > 0): Number of traces to load per iteration; has to at least satisfy
the update batch size
(<span style="color:#00C000"><b>default</b></span>: 1).
num_updates (int > 0): Number of updates per iteration
(<span style="color:#00C000"><b>default</b></span>: 1).
extension (str): Traces file extension to filter the given directory for
(<span style="color:#00C000"><b>default</b></span>: ".npz").
"""
if not os.path.isdir(directory):
raise TensorforceError.value(
name='agent.pretrain', argument='directory', value=directory
)
files = sorted(
os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f)) and os.path.splitext(f)[1] == extension
)
indices = list(range(len(files)))
for _ in range(num_iterations):
shuffle(indices)
if num_traces is None:
selection = indices
else:
selection = indices[:num_traces]
batch = None
for index in selection:
trace = ArrayDict(np.load(files[index]))
if batch is None:
batch = trace
else:
batch = batch.fmap(
function=(lambda x, y: np.concatenate([x, y], axis=0)), zip_values=(trace,)
)
for name, value in batch.pop('auxiliaries', dict()).items():
assert name.endswith('/mask')
batch['states'][name[:-5] + '_mask'] = value
self.experience(**batch.to_kwargs())
for _ in range(num_updates):
self.update()
# TODO: self.obliviate()
| 56.146143 | 146 | 0.603659 |
d2960bb91ab2d1be05009ffa1650de8c0dd97a56 | 591 | py | Python | ENV/bin/createfontdatachunk.py | jupitercl/meatme-oscar-dev | 40583cbb3c6762640a403956e41bffac0bb2ad48 | [
"BSD-3-Clause"
] | null | null | null | ENV/bin/createfontdatachunk.py | jupitercl/meatme-oscar-dev | 40583cbb3c6762640a403956e41bffac0bb2ad48 | [
"BSD-3-Clause"
] | null | null | null | ENV/bin/createfontdatachunk.py | jupitercl/meatme-oscar-dev | 40583cbb3c6762640a403956e41bffac0bb2ad48 | [
"BSD-3-Clause"
] | null | null | null | #!/home/ubuntu/workspace/meatme-oscar/ENV/bin/python
from __future__ import print_function
import base64
import os
import sys
if __name__ == "__main__":
# create font data chunk for embedding
font = "Tests/images/courB08"
print(" f._load_pilfont_data(")
print(" # %s" % os.path.basename(font))
print(" BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pil", "rb"), sys.stdout)
print("''')), Image.open(BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
print("'''))))")
# End of file
| 31.105263 | 63 | 0.637902 |
457fb74f1245bc4877b66445585b4d3b536d6395 | 388 | py | Python | LeetCode/NumberOfGoodPairs.py | SelvorWhim/competitive | b9daaf21920d6f7669dc0c525e903949f4e33b62 | [
"Unlicense"
] | null | null | null | LeetCode/NumberOfGoodPairs.py | SelvorWhim/competitive | b9daaf21920d6f7669dc0c525e903949f4e33b62 | [
"Unlicense"
] | null | null | null | LeetCode/NumberOfGoodPairs.py | SelvorWhim/competitive | b9daaf21920d6f7669dc0c525e903949f4e33b62 | [
"Unlicense"
] | null | null | null | # don't need to show the pairs, so the order doesn't matter
# just need to find how many times each number appears and count the pairs
from collections import Counter
class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
counts = Counter(nums)
return sum(count * (count - 1) // 2 for count in counts.values()) # n*(n-1)/2 = number of pairs in n items
| 38.8 | 114 | 0.688144 |
34edd7696ad72e3b7d649f5b255116229335dac7 | 118 | py | Python | duckspush/templates/datasources.py | novapost/duckspush | baa9d609a650040c12a721a83d9f45369af90694 | [
"BSD-3-Clause"
] | null | null | null | duckspush/templates/datasources.py | novapost/duckspush | baa9d609a650040c12a721a83d9f45369af90694 | [
"BSD-3-Clause"
] | null | null | null | duckspush/templates/datasources.py | novapost/duckspush | baa9d609a650040c12a721a83d9f45369af90694 | [
"BSD-3-Clause"
] | null | null | null | #import random
## Collector example function
# def collector_test():
# return dict(value=random.randint(1,1000))
| 19.666667 | 47 | 0.728814 |
77efb314c48149c1e0216c8e444dc767dba7adf8 | 2,206 | py | Python | tests/conftest.py | riddhi150390/splink | eae96a757877cf197608be21b437356732faee6e | [
"MIT"
] | 176 | 2020-03-16T15:19:39.000Z | 2022-03-30T06:38:29.000Z | tests/conftest.py | riddhi150390/splink | eae96a757877cf197608be21b437356732faee6e | [
"MIT"
] | 194 | 2020-03-01T21:32:26.000Z | 2022-03-30T14:58:38.000Z | tests/conftest.py | riddhi150390/splink | eae96a757877cf197608be21b437356732faee6e | [
"MIT"
] | 25 | 2020-03-07T00:09:22.000Z | 2022-03-11T16:28:06.000Z | import pytest
import logging
logger = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def spark():
try:
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
from pyspark.sql import types
conf = SparkConf()
conf.set("spark.jars.ivy", "/home/jovyan/.ivy2/")
conf.set("spark.driver.extraClassPath", "jars/scala-udf-similarity-0.0.7.jar")
conf.set("spark.jars", "jars/scala-udf-similarity-0.0.7.jar")
conf.set("spark.driver.memory", "4g")
conf.set("spark.sql.shuffle.partitions", "12")
sc = SparkContext.getOrCreate(conf=conf)
spark = SparkSession(sc)
udfs = [
("jaro_winkler_sim", "JaroWinklerSimilarity", types.DoubleType()),
("jaccard_sim", "JaccardSimilarity", types.DoubleType()),
("cosine_distance", "CosineDistance", types.DoubleType()),
("Dmetaphone", "DoubleMetaphone", types.StringType()),
("QgramTokeniser", "QgramTokeniser", types.StringType()),
("Q3gramTokeniser", "Q3gramTokeniser", types.StringType()),
("Q4gramTokeniser", "Q4gramTokeniser", types.StringType()),
("Q5gramTokeniser", "Q5gramTokeniser", types.StringType()),
("DmetaphoneAlt", "DoubleMetaphoneAlt", types.StringType()),
]
for a, b, c in udfs:
spark.udf.registerJavaFunction(a, "uk.gov.moj.dash.linkage." + b, c)
rt = types.ArrayType(
types.StructType(
[
types.StructField("_1", types.StringType()),
types.StructField("_2", types.StringType()),
]
)
)
spark.udf.registerJavaFunction(
name="DualArrayExplode",
javaClassName="uk.gov.moj.dash.linkage.DualArrayExplode",
returnType=rt,
)
SPARK_EXISTS = True
except:
SPARK_EXISTS = False
if SPARK_EXISTS:
print("Spark exists, running spark tests")
yield spark
else:
spark = None
logger.error("Spark not available")
print("Spark not available")
yield spark
| 31.070423 | 86 | 0.584769 |
5aa67b18726aa3b5bd632607cec1f8dafea0e8d8 | 507 | py | Python | cache_publications.py | igormcoelho/igormcoelho.github.io | 2fe1e3d54f9ffec04fa6cd2f90c7860400a8ecf8 | [
"MIT"
] | 2 | 2020-07-12T01:21:20.000Z | 2021-06-19T04:05:59.000Z | cache_publications.py | igormcoelho/igormcoelho.github.io | 2fe1e3d54f9ffec04fa6cd2f90c7860400a8ecf8 | [
"MIT"
] | 3 | 2021-09-28T04:45:57.000Z | 2022-02-26T09:16:56.000Z | cache_publications.py | igormcoelho/igormcoelho.github.io | 2fe1e3d54f9ffec04fa6cd2f90c7860400a8ecf8 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
papers = BeautifulSoup(open("_site/generate_selected_papers.html").read())
pub_html = papers.body.article.find(id="pub_selected_papers").prettify()
f = open('_includes/selected_papers_cache.html', 'w')
f.write(pub_html)
f.close()
from bs4 import BeautifulSoup
papers = BeautifulSoup(open("_site/generate_all_papers.html").read())
pub_html = papers.body.article.find(id="pub_all_papers").prettify()
f = open('_includes/all_papers_cache.html', 'w')
f.write(pub_html)
f.close()
| 33.8 | 74 | 0.775148 |
b3b2c7cd86734b447130a542cb1d091ebbe0b51a | 383 | py | Python | code/backend/src/database/models/__init__.py | tobiasclaas/semantic-data-lake | 16fc25a74918c9ac4d95f14bf3c15af053cee53e | [
"MIT"
] | 1 | 2022-02-23T14:32:38.000Z | 2022-02-23T14:32:38.000Z | code/backend/src/database/models/__init__.py | tobiasclaas/semantic-data-lake | 16fc25a74918c9ac4d95f14bf3c15af053cee53e | [
"MIT"
] | null | null | null | code/backend/src/database/models/__init__.py | tobiasclaas/semantic-data-lake | 16fc25a74918c9ac4d95f14bf3c15af053cee53e | [
"MIT"
] | null | null | null | from .storage import (
BaseStorage, MongodbStorage, PostgresqlStorage, CsvStorage,
JsonStorage, XmlStorage
)
from .user import User
from .workspace import Workspace
from .ontology import Ontology
from .dataset import Dataset
from .metadata import Metadata
from .datamart import DatamartStatus, Datamart, DatamartState
from .annotation import Annotation
from .job import Job
| 27.357143 | 63 | 0.81201 |
7b69ab522eb623e8117709c6ffb1efd8776ba8af | 813 | py | Python | netsuite/api/predicates.py | fernando-almeida/python-netsuite | da123b23d1af91767c9d10c2a1f60efabd855668 | [
"BSD-3-Clause"
] | 7 | 2018-02-09T19:22:56.000Z | 2021-01-24T04:05:02.000Z | netsuite/api/predicates.py | fernando-almeida/python-netsuite | da123b23d1af91767c9d10c2a1f60efabd855668 | [
"BSD-3-Clause"
] | null | null | null | netsuite/api/predicates.py | fernando-almeida/python-netsuite | da123b23d1af91767c9d10c2a1f60efabd855668 | [
"BSD-3-Clause"
] | 6 | 2020-07-28T12:43:10.000Z | 2021-06-10T00:17:36.000Z | """Predicates."""
class RecordTypeSelectorPredicate(object):
"""Check if a given record type should be selected."""
def __init__(self, record_types=None)
"""Constructor.
Args:
record_types: Types of records
"""
if not (isinstance(record_types, list)):
raise Exception("Record types must be a list")
self.record_types = record_types or []
def __call__(self, record_type):
"""Check if a given record type should be batched.
Args:
record_type: Type of record to check for batching
Returns:
True if the record type should be selected or False otherwise
"""
if not self.record_types:
return False
return record_type in self.record_types
| 26.225806 | 77 | 0.602706 |
c755540ef9420e4b6811c3cf3842a0bd8a907215 | 3,800 | py | Python | hsfs/numerov.py | aa-morgan/helium-stark-FS | 7617c0761398dc60b69bb01c533cfa405c2a3d82 | [
"BSD-3-Clause"
] | null | null | null | hsfs/numerov.py | aa-morgan/helium-stark-FS | 7617c0761398dc60b69bb01c533cfa405c2a3d82 | [
"BSD-3-Clause"
] | null | null | null | hsfs/numerov.py | aa-morgan/helium-stark-FS | 7617c0761398dc60b69bb01c533cfa405c2a3d82 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 13:08:34 2017
@author: Adam
"""
from math import ceil, log, exp
import numpy as np
from numba import jit
@jit
def wf(n, l, nmax, step=0.005, rmin=0.65):
""" Use the Numerov method to find the wavefunction for state n*, l, where
n* = n - delta.
nmax ensures that wavefunctions from different values of n can be aligned.
"""
W1 = -0.5 * n**-2.0
W2 = (l + 0.5)**2.0
rmax = 2 * nmax * (nmax + 15)
r_in = n**2.0 - n * (n**2.0 - l*(l + 1.0))**0.5
step_sq = step**2.0
# ensure wf arrays will align using nmax
if n == nmax:
i = 0
r_sub2 = rmax
else:
i = int(ceil(log(rmax / (2 * n * (n + 15))) / step))
r_sub2 = rmax * exp(-i*step)
i += 1
# initialise
r_sub1 = rmax * exp(-i*step)
rvals = [r_sub2, r_sub1]
g_sub2 = 2.0 * r_sub2**2.0 * (-1.0 / r_sub2 - W1) + W2
g_sub1 = 2.0 * r_sub1**2.0 * (-1.0 / r_sub1 - W1) + W2
y_sub2 = 1e-10
y_sub1 = y_sub2 * (1.0 + step * g_sub2**0.5)
yvals = [y_sub2, y_sub1]
# Numerov method
i += 1
r = r_sub1
while r >= rmin:
## next step
r = rmax * exp(-i*step)
g = 2.0 * r**2.0 * (-1.0 / r - W1) + W2
y = (y_sub2 * (g_sub2 - (12.0 / step_sq)) + y_sub1 * \
(10.0 * g_sub1 + (24.0 / step_sq))) / ((12.0 / step_sq) - g)
## check for divergence
if r < r_in:
dy = abs((y - y_sub1) / y_sub1)
dr = (r**(-l-1) - r_sub1**(-l-1)) / r_sub1**(-l-1)
if dy > dr:
break
## store vals
rvals.append(r)
yvals.append(y)
## next iteration
r_sub1 = r
g_sub2 = g_sub1
g_sub1 = g
y_sub2 = y_sub1
y_sub1 = y
i += 1
rvals = np.array(rvals)
yvals = np.array(yvals)
# normalisation
yvals = yvals * (np.sum((yvals**2.0) * (rvals**2.0)))**-0.5
return rvals, yvals
@jit
def find_first(arr, val):
""" Index of the first occurence of val in arr.
"""
i = 0
while i < len(arr):
if val == arr[i]:
return i
i += 1
raise Exception('val not found in arr')
@jit
def find_last(arr, val):
""" Index of the last occurence of val in arr.
"""
i = len(arr) - 1
while i > 0:
if val == arr[i]:
return i
i -= 1
raise Exception('val not found in arr')
@jit
def wf_align(r1, y1, r2, y2):
""" Align two lists pairs (r, y) on r, assuming r array values overlap
except at head and tail, and that arrays are reverse sorted.
"""
if r1[0] != r2[0]:
# trim front end
if r1[0] > r2[0]:
idx = find_first(r1, r2[0])
r1 = r1[idx:]
y1 = y1[idx:]
else:
idx = find_first(r2, r1[0])
r2 = r2[idx:]
y2 = y2[idx:]
if r1[-1] != r2[-1]:
# trim back end
if r1[-1] < r2[-1]:
idx = find_last(r1, r2[-1])
r1 = r1[:idx + 1]
y1 = y1[:idx + 1]
else:
idx = find_last(r2, r1[-1])
r2 = r2[:idx + 1]
y2 = y2[:idx + 1]
if r1[0] == r2[0] and r1[-1] == r2[-1] and len(r1) == len(r2):
return r1, y1, r2, y2
else:
raise Exception("Failed to align wavefunctions.")
@jit
def wf_overlap(r1, y1, r2, y2, p=1.0):
""" Find the overlap between two radial wavefunctions (r, y).
"""
r1, y1, r2, y2 = wf_align(r1, y1, r2, y2)
return np.sum(y1 * y2 * r1**(2.0 + p))
@jit(cache=True)
def radial_overlap(n1, l1, n2, l2, p=1.0):
""" Radial overlap for state n1, l1 and n2 l2.
"""
nmax = max(n1, n2)
r1, y1 = wf(n1, l1, nmax)
r2, y2 = wf(n2, l2, nmax)
return wf_overlap(r1, y1, r2, y2, p) | 26.760563 | 82 | 0.485263 |
6a01980f27ee8bec8c897fe399fd4ada64d6ff03 | 668 | py | Python | Config.py | izzortsi/crypto-trading-ai-bot-basic | 38cb5bda778447d55b7854b5d2035e3da0f629b2 | [
"MIT"
] | 195 | 2021-01-08T12:46:28.000Z | 2022-03-27T19:29:08.000Z | Config.py | izzortsi/crypto-trading-ai-bot-basic | 38cb5bda778447d55b7854b5d2035e3da0f629b2 | [
"MIT"
] | 8 | 2021-02-04T05:19:40.000Z | 2021-10-10T20:32:55.000Z | Config.py | izzortsi/crypto-trading-ai-bot-basic | 38cb5bda778447d55b7854b5d2035e3da0f629b2 | [
"MIT"
] | 93 | 2021-01-08T12:46:30.000Z | 2022-03-13T20:59:12.000Z | DATASET_DIR = "datasets/"
COIN_PAIR = "BTC-USD"
GRANULARITY = 60 # Data every 1 minute
TRAINING_MONTHS = ["2018_06","2018_07","2018_08","2018_09","2018_10","2018_11","2018_12","2019_01",
"2019_02","2019_03","2019_04","2019_05","2019_06","2019_07","2019_08","2019_09",
"2019_10","2019_11","2019_12","2020_01","2020_02","2020_03","2020_04","2020_05",
"2020_06","2020_07","2020_08","2020_09"]
TESTING_MONTHS = ["2020_10"]
# Model and Auto Trader
CHANGE_RATE_THRESHOLD = 0.005
TRAINING_WINDOW = 360 # Window to use for training in minutes
LABELING_WINDOW = 360 # How far ahead to look for labeling / prediction
| 41.75 | 99 | 0.663174 |
23e886badebb57d1b989b9e55b0eb786efc54585 | 4,497 | py | Python | utils.py | xavysp/pidinet_xsp | fbf23bdf2dde0f6204c61261eee32a05cd3b499d | [
"MIT"
] | null | null | null | utils.py | xavysp/pidinet_xsp | fbf23bdf2dde0f6204c61261eee32a05cd3b499d | [
"MIT"
] | null | null | null | utils.py | xavysp/pidinet_xsp | fbf23bdf2dde0f6204c61261eee32a05cd3b499d | [
"MIT"
] | null | null | null | """
Utility functions for training
Author: Zhuo Su, Wenzhe Liu
Date: Aug 22, 2020
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import os
import shutil
import math
import time
import random
from PIL import Image
import numpy as np
from skimage import io
from skimage.transform import resize
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
######################################
# measurement functions #
######################################
def get_model_parm_nums(model):
total = sum([param.numel() for param in model.parameters()])
total = float(total) / 1e6
return total
######################################
# basic functions #
######################################
def load_checkpoint(args, running_file=None):
model_dir = os.path.join(args.savedir, 'save_models')
latest_filename = os.path.join(model_dir, 'latest.txt')
model_filename = ''
if args.evaluate is not None:
model_filename = args.evaluate
else:
if os.path.exists(latest_filename):
with open(latest_filename, 'r') as fin:
model_filename = fin.readlines()[0].strip()
loadinfo = "=> loading checkpoint from '{}'".format(model_filename)
print(loadinfo)
state = None
if os.path.exists(model_filename):
state = torch.load(model_filename, map_location='cpu')
loadinfo2 = "=> loaded checkpoint '{}' successfully".format(model_filename)
else:
loadinfo2 = "no checkpoint loaded"
print(loadinfo2)
running_file.write('%s\n%s\n' % (loadinfo, loadinfo2))
running_file.flush()
return state
def save_checkpoint(state, epoch, root, saveID, keep_freq=10, train_data='NONE'):
filename = 'checkpoint_%03d.pth.tar' % epoch
model_dir = os.path.join(root, 'save_models')
model_filename = os.path.join(model_dir, filename)
latest_filename = os.path.join(model_dir, 'latest.txt')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# write new checkpoint
torch.save(state, model_filename)
with open(latest_filename, 'w') as fout:
fout.write(model_filename)
print("=> saved checkpoint '{}'".format(model_filename))
# remove old model
if saveID is not None and (saveID + 1) % keep_freq != 0:
filename = 'checkpoint_%03d.pth.tar' % saveID
model_filename = os.path.join(model_dir, filename)
if os.path.exists(model_filename):
os.remove(model_filename)
print('=> removed checkpoint %s' % model_filename)
print('##########Time##########', time.strftime('%Y-%m-%d %H:%M:%S'))
return epoch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
#self.sum += val * n
self.sum += val
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, args):
method = args.lr_type
if method == 'cosine':
T_total = float(args.epochs)
T_cur = float(epoch)
lr = 0.5 * args.lr * (1 + math.cos(math.pi * T_cur / T_total))
elif method == 'multistep':
lr = args.lr
for epoch_step in args.lr_steps:
if epoch >= epoch_step:
lr = lr * 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = lr
str_lr = '%.6f' % lr
return str_lr
######################################
# edge specific functions #
######################################
def cross_entropy_loss_RCF(prediction, labelf, beta):
label = labelf.long()
mask = labelf.clone()
num_positive = torch.sum(label==1).float()
num_negative = torch.sum(label==0).float()
mask[label == 1] = 1.0 * num_negative / (num_positive + num_negative)
mask[label == 0] = beta * num_positive / (num_positive + num_negative)
mask[label == 2] = 0
cost = F.binary_cross_entropy(
prediction, labelf, weight=mask, reduction='sum')
return cost
######################################
# debug functions #
######################################
# no function currently
| 28.10625 | 83 | 0.594174 |
62a5c755fccbd76f16e2ae824000cf05bd5c75fe | 1,857 | py | Python | jelm/tests/test_io.py | endremborza/jelm | 6916bbd4ceb909ad3350c56d3a149bdb97671489 | [
"MIT"
] | null | null | null | jelm/tests/test_io.py | endremborza/jelm | 6916bbd4ceb909ad3350c56d3a149bdb97671489 | [
"MIT"
] | null | null | null | jelm/tests/test_io.py | endremborza/jelm | 6916bbd4ceb909ad3350c56d3a149bdb97671489 | [
"MIT"
] | null | null | null | import json
import os
import pytest
from jelm.core.jelm_class import Jelm
from jelm.core.io.input import reads_json, read_json
from jelm.tests.network_case_set_class import NetwokCaseTemplate
def test_json_reads():
test_dic = {"metadata": {"author": "me"}, "objects": []}
dump = json.dumps(test_dic)
el = reads_json(dump)
assert isinstance(el, Jelm)
def test_json_reads_dumps_w_cases(jelm_pair_case: NetwokCaseTemplate):
def io_fun(_el: Jelm):
_dump = _el.json_dumps()
return reads_json(_dump)
jelm_pair_case.evaluate_fun(non_altering_function=io_fun)
def test_json_read(tmp_path):
d = tmp_path / "sub"
d.mkdir()
p = d / "fing.jelm"
test_dic = {"metadata": {"author": "me"}, "objects": []}
dump = json.dumps(test_dic)
p.write_text(dump)
fp = os.fspath(p)
el = read_json(fp)
assert isinstance(el, Jelm)
def test_json_dump(tmp_path):
d = tmp_path / "sub2"
d.mkdir()
p = d / "fing1.jelm"
p2 = d / "fing2.jelm"
test_dic = {"metadata": {"author": "me"}, "objects": [{"type": "node", "id": "n1"}]}
el = Jelm(**test_dic)
assert isinstance(el.dict(), dict)
assert el.dict() == test_dic
assert isinstance(el.json_dumps(), str)
fp = os.fspath(p)
fp2 = os.fspath(p2)
el.json_dump(fp)
el.json_dump(open(fp2, "w"))
el2 = read_json(fp)
el3 = read_json(fp2)
assert el.dict() == el2.dict()
assert el.dict() == el3.dict()
with pytest.raises(TypeError):
el.json_dump(10)
def test_json_read_dump_w_cases(tmp_path, jelm_pair_case: NetwokCaseTemplate):
d = tmp_path / "sub3"
d.mkdir()
p = d / "fing3.jelm"
fp = os.fspath(p)
def io_fun(_el: Jelm):
_el.json_dump(fp)
return read_json(fp)
jelm_pair_case.evaluate_fun(non_altering_function=io_fun)
| 19.755319 | 88 | 0.637588 |
8f51d7b60fbb6245bc75f1362989e315d9828b4d | 341 | py | Python | src/lesson_developer_tools/traceback_format_exception.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | 3 | 2018-08-14T09:33:52.000Z | 2022-03-21T12:31:58.000Z | src/lesson_developer_tools/traceback_format_exception.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | null | null | null | src/lesson_developer_tools/traceback_format_exception.py | jasonwee/asus-rt-n14uhp-mrtg | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | [
"Apache-2.0"
] | null | null | null | import traceback
import sys
from pprint import pprint
from traceback_example import produce_exception
try:
produce_exception()
except Exception as err:
print('format_exception():')
exc_type, exc_value, exc_tb = sys.exc_info()
pprint(
traceback.format_exception(exc_type, exc_value, exc_tb),
width=65,
)
| 21.3125 | 64 | 0.721408 |
2327c73aa4f89a42a26301da272b83b597188b73 | 27 | py | Python | monitor/monitor_engine/tests.py | josamuna/codepo-backend | 3666527aebcc3bd22b45c359d1bc68abc85a9a47 | [
"MIT"
] | 7 | 2020-12-28T23:03:11.000Z | 2020-12-30T09:29:47.000Z | monitor/monitor_engine/tests.py | josamuna/codepo-backend | 3666527aebcc3bd22b45c359d1bc68abc85a9a47 | [
"MIT"
] | 23 | 2020-02-09T12:24:00.000Z | 2021-06-04T22:52:45.000Z | monitor/monitor_engine/tests.py | josamuna/codepo-backend | 3666527aebcc3bd22b45c359d1bc68abc85a9a47 | [
"MIT"
] | 2 | 2020-12-28T23:15:28.000Z | 2020-12-29T07:56:37.000Z | # Create your tests here.
| 13.5 | 26 | 0.703704 |
3b234210e8bd2bb5ae4f029569f7a8149654512c | 940 | py | Python | front-end-service/audio_consumer.py | Rukundo725/speech-to-text-data-collection | 61c3d4858febc81980c854b5112bb629191e45af | [
"MIT",
"Unlicense"
] | 3 | 2021-12-03T09:06:31.000Z | 2022-03-31T06:13:47.000Z | front-end-service/audio_consumer.py | Rukundo725/speech-to-text-data-collection | 61c3d4858febc81980c854b5112bb629191e45af | [
"MIT",
"Unlicense"
] | 16 | 2021-09-06T20:19:25.000Z | 2021-09-09T20:29:42.000Z | front-end-service/audio_consumer.py | Rukundo725/speech-to-text-data-collection | 61c3d4858febc81980c854b5112bb629191e45af | [
"MIT",
"Unlicense"
] | 4 | 2021-09-06T19:46:45.000Z | 2021-09-08T19:05:31.000Z | from kafka import KafkaConsumer, KafkaProducer
import os
import json
import uuid
from concurrent.futures import ThreadPoolExecutor
from scipy.io.wavfile import read, write
import pydub
import io
import boto3
s3 = boto3.resource('s3')
if __name__ == "__main__":
TOPIC_NAME = "audio"
consumer = KafkaConsumer(TOPIC_NAME, client_id='d_id', bootstrap_servers=["b-1.demo-cluster-1.9q7lp7.c1.kafka.eu-west-1.amazonaws.com:9092",
"b-2.demo-cluster-1.9q7lp7.c1.kafka.eu-west-1.amazonaws.com:9092"],
auto_offset_reset='earliest',
enable_auto_commit=True)
for event in consumer:
event_data = event.value
print(event_data)
bytes_wav = bytes()
byte_io = io.BytesIO(event_data)
print ("done")
audio = pydub.AudioSegment.from_raw(byte_io, sample_width=2, frame_rate=22050, channels=1).export("newfile", format='wav')
s3.meta.client.upload_file("newfile","chang-stt-bucket","newfile.wav")
| 32.413793 | 142 | 0.730851 |
b913385f26700b04dd5f9a4018aa9a7732898bac | 1,082 | py | Python | libdata/chexpert_index/split_trainset.py | google-research/understanding-transfer-learning | 0e4df444f342784514d91028d0de332103343a94 | [
"Apache-2.0"
] | 35 | 2020-11-10T18:50:20.000Z | 2022-01-06T05:37:56.000Z | libdata/chexpert_index/split_trainset.py | MLC-CV/transfer-learning-understanding | 0e4df444f342784514d91028d0de332103343a94 | [
"Apache-2.0"
] | null | null | null | libdata/chexpert_index/split_trainset.py | MLC-CV/transfer-learning-understanding | 0e4df444f342784514d91028d0de332103343a94 | [
"Apache-2.0"
] | 10 | 2020-11-17T01:22:07.000Z | 2022-03-10T21:48:35.000Z | #!/usr/bin/python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
with open('train.csv', 'r') as in_f:
lines = in_f.readlines()
header = lines[0]
lines = lines[1:]
rng = np.random.RandomState(seed=1234)
lines = rng.permutation(lines)
def export(fn, start_idx, stop_idx):
with open(fn, 'w') as out_f:
out_f.write(header)
for line in lines[start_idx:stop_idx]:
out_f.write(line)
# 50k trainingset
export('train.0-50k.csv', 0, 50000)
# 50k for testset
export('train.50k-100k.csv', 50000, 100000)
| 28.473684 | 74 | 0.715342 |
83d9b052365e6fa8d2da387d7762b65219856260 | 236 | py | Python | project/apps/hiking/views.py | skoczen/skoczen | 07d22029e7c9e21bc3322e26072471d01c059cc4 | [
"BSD-2-Clause"
] | null | null | null | project/apps/hiking/views.py | skoczen/skoczen | 07d22029e7c9e21bc3322e26072471d01c059cc4 | [
"BSD-2-Clause"
] | null | null | null | project/apps/hiking/views.py | skoczen/skoczen | 07d22029e7c9e21bc3322e26072471d01c059cc4 | [
"BSD-2-Clause"
] | null | null | null | from annoying.decorators import render_to
from hiking.models import Hike
@render_to("hiking/home.html")
def home(request):
hike = Hike.objects.order_by("-date")
if hike.count() > 0:
hike = hike[0]
return locals()
| 19.666667 | 41 | 0.677966 |
8d14dcb75889fff2ebe1fbe06c22d283584c81e7 | 2,581 | py | Python | saleor/core/templatetags/status.py | ProgrammingJoe/saleor | f89dbafe0b31c226e267b7a516a09dd5168e1911 | [
"BSD-3-Clause"
] | 2 | 2018-12-24T01:00:13.000Z | 2019-01-21T15:09:16.000Z | saleor/core/templatetags/status.py | ProgrammingJoe/saleor | f89dbafe0b31c226e267b7a516a09dd5168e1911 | [
"BSD-3-Clause"
] | 64 | 2019-02-11T17:02:05.000Z | 2021-06-25T15:16:57.000Z | saleor/core/templatetags/status.py | ProgrammingJoe/saleor | f89dbafe0b31c226e267b7a516a09dd5168e1911 | [
"BSD-3-Clause"
] | 2 | 2019-01-08T02:32:42.000Z | 2021-07-05T14:05:55.000Z | from django.template import Library
from payments import PaymentStatus
from ...order import OrderStatus
from ...product import ProductAvailabilityStatus, VariantAvailabilityStatus
from ...product.utils.availability import (
get_product_availability_status, get_variant_availability_status)
register = Library()
ERRORS = {PaymentStatus.ERROR, PaymentStatus.REJECTED}
SUCCESSES = {PaymentStatus.CONFIRMED, PaymentStatus.REFUNDED}
LABEL_DANGER = 'danger'
LABEL_SUCCESS = 'success'
LABEL_DEFAULT = 'default'
@register.inclusion_tag('status_label.html')
def render_status(status, status_display=None):
if status in ERRORS:
label_cls = LABEL_DANGER
elif status in SUCCESSES:
label_cls = LABEL_SUCCESS
else:
label_cls = LABEL_DEFAULT
return {'label_cls': label_cls, 'status': status_display or status}
@register.inclusion_tag('status_label.html')
def render_order_status(status, status_display=None):
if status == OrderStatus.FULFILLED:
label_cls = LABEL_SUCCESS
else:
label_cls = LABEL_DEFAULT
return {'label_cls': label_cls, 'status': status_display or status}
@register.inclusion_tag('status_label.html')
def render_availability_status(product):
status = get_product_availability_status(product)
display = ProductAvailabilityStatus.get_display(status)
if status == ProductAvailabilityStatus.READY_FOR_PURCHASE:
label_cls = LABEL_SUCCESS
else:
label_cls = LABEL_DANGER
return {'status': display, 'label_cls': label_cls}
@register.inclusion_tag('status_label.html')
def render_variant_availability_status(variant):
status = get_variant_availability_status(variant)
display = VariantAvailabilityStatus.get_display(status)
if status == VariantAvailabilityStatus.AVAILABLE:
label_cls = LABEL_SUCCESS
else:
label_cls = LABEL_DANGER
return {'status': display, 'label_cls': label_cls}
@register.inclusion_tag('dashboard/includes/_page_availability.html')
def render_page_availability(page):
ctx = {'is_published': page.is_published, 'page': page}
if page.is_published:
label_cls = LABEL_SUCCESS
ctx.update({'label_cls': label_cls})
return ctx
@register.inclusion_tag('dashboard/includes/_collection_availability.html')
def render_collection_availability(collection):
if collection.is_published:
label_cls = LABEL_SUCCESS
else:
label_cls = LABEL_DANGER
return {'is_published': collection.is_published,
'collection': collection,
'label_cls': label_cls}
| 31.864198 | 75 | 0.748547 |
c68dafee2ba404b5fd38870662c409e1910966b3 | 16,912 | py | Python | molpal/models/nnmodels.py | jenna-fromer/molpal | a6320610d0c9ecd708f4a709110f272867eaa6cf | [
"MIT"
] | null | null | null | molpal/models/nnmodels.py | jenna-fromer/molpal | a6320610d0c9ecd708f4a709110f272867eaa6cf | [
"MIT"
] | null | null | null | molpal/models/nnmodels.py | jenna-fromer/molpal | a6320610d0c9ecd708f4a709110f272867eaa6cf | [
"MIT"
] | null | null | null | """This module contains Model implementations that utilize an NN model as their
underlying model"""
from functools import partial
import json
from pathlib import Path
from typing import Callable, Iterable, List, NoReturn, Optional, Sequence, Tuple, TypeVar
import numpy as np
from numpy import ndarray
from tqdm import tqdm
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow import keras
from molpal.featurizer import Featurizer, feature_matrix
from molpal.models.base import Model
T = TypeVar("T")
T_feat = TypeVar("T_feat")
Dataset = tf.data.Dataset
def mve_loss(y_true, y_pred):
mu = y_pred[:, 0]
var = tf.math.softplus(y_pred[:, 1])
return tf.reduce_mean(
tf.math.log(2 * 3.141592) / 2
+ tf.math.log(var) / 2
+ tf.math.square(mu - y_true) / (2 * var)
)
class NN:
"""A feed-forward neural network model
Attributes
----------
model : keras.Sequential
the underlying model on which to train and perform inference with
optimizer : keras.optimizers.Adam
the model optimizer
loss : Callable
the loss function to use
input_size : int
the dimension of the model input
output_size : int
the dimension of the model output
batch_size : int
the size to batch training into
uncertainty : Optional[str]
the uncertainty method this model is using (if at all)
uncertainty : bool
Whether the model directly predicts its own uncertainty
mean : float
the mean of the unnormalized data
std : float
the standard deviation of the unnormalized data
Parameters
----------
input_size : int
num_tasks : int
batch_size : int, default=4096
layer_sizes : Optional[Sequence[int]], default=None
the sizes of the hidden layers in the network. If None, default to
two hidden layers with 100 neurons each.
dropout : Optional[float], default=None
if specified, add a dropout hidden layer with the specified dropout
rate after each hidden layer
activation : Optional[str], default='relu'
the name of the activation function to use
uncertainty : Optional[str], default=None
"""
def __init__(
self,
input_size: int,
num_tasks: int,
batch_size: int = 4096,
layer_sizes: Optional[Sequence[int]] = None,
dropout: Optional[float] = None,
activation: Optional[str] = "relu",
uncertainty: Optional[str] = None,
model_seed: Optional[int] = None,
):
self.input_size = input_size
self.batch_size = batch_size
self.uncertainty = uncertainty
layer_sizes = layer_sizes or [100, 100]
self.model, self.optimizer, self.loss = self.build(
input_size, num_tasks, layer_sizes, dropout, self.uncertainty, activation
)
self.mean = 0
self.std = 0
tf.random.set_seed(model_seed)
def build(self, input_size, num_tasks, layer_sizes, dropout, uncertainty, activation):
"""Build the model, optimizer, and loss function"""
dropout_at_predict = uncertainty == "dropout"
output_size = 2 * num_tasks if self.uncertainty else num_tasks
inputs = keras.layers.Input(shape=(input_size,))
hidden = inputs
for layer_size in layer_sizes:
hidden = keras.layers.Dense(
units=layer_size,
activation=activation,
kernel_regularizer=keras.regularizers.l2(0.01),
)(hidden)
if dropout:
hidden = keras.layers.Dropout(dropout)(hidden, training=dropout_at_predict)
outputs = keras.layers.Dense(output_size, activation="linear")(hidden)
model = keras.Model(inputs, outputs)
if uncertainty not in {"mve"}:
optimizer = keras.optimizers.Adam(lr=0.01)
loss = keras.losses.mse
elif uncertainty == "mve":
optimizer = keras.optimizers.Adam(lr=0.05)
loss = mve_loss
else:
raise ValueError(f'Unrecognized uncertainty method: "{uncertainty}"')
return model, optimizer, loss
def train(
self, xs: Iterable[T], ys: Iterable[float], featurizer: Callable[[T], ndarray]
) -> bool:
"""Train the model on xs and ys with the given featurizer
Parameters
----------
xs : Sequence[T]
an sequence of inputs in their identifier representations
ys : Sequence[float]
a parallel sequence of target values for these inputs
featurize : Callable[[T], ndarray]
a function that transforms an identifier into its uncompressed
feature representation
Returns
-------
True
"""
self.model.compile(optimizer=self.optimizer, loss=self.loss)
X = np.array(feature_matrix(xs, featurizer))
Y = self._normalize(ys)
self.model.fit(
X,
Y,
batch_size=self.batch_size,
validation_split=0.2,
epochs=50,
validation_freq=2,
verbose=0,
callbacks=[
keras.callbacks.EarlyStopping(
monitor="val_loss", patience=5, restore_best_weights=True, verbose=0
),
tfa.callbacks.TQDMProgressBar(leave_epoch_progress=False),
],
)
return True
def predict(self, xs: Sequence[ndarray]) -> ndarray:
X = np.stack(xs, axis=0)
Y_pred = self.model.predict(X)
if self.uncertainty == "mve":
Y_pred[:, 0::2] = Y_pred[:, 0::2] * self.std + self.mean
Y_pred[:, 1::2] = Y_pred[:, 1::2] * self.std ** 2
else:
Y_pred = Y_pred * self.std + self.mean
return Y_pred
def save(self, path) -> str:
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
model_path = f"{path}/model"
self.model.save(model_path, include_optimizer=True)
state_path = f"{path}/state.json"
state = {"std": self.std, "mean": self.mean, "model_path": model_path}
json.dump(state, open(state_path, "w"), indent=4)
return state_path
def load(self, path):
state = json.load(open(path, "r"))
model_path = state["model_path"]
self.std = state["std"]
self.mean = state["mean"]
if self.uncertainty == "mve":
custom_objects = {"mve_loss": mve_loss}
else:
custom_objects = {}
self.model = keras.models.load_model(model_path, custom_objects=custom_objects)
def _normalize(self, ys: Sequence[float]) -> ndarray:
Y = np.stack(list(ys))
self.mean = np.nanmean(Y, axis=0)
self.std = np.nanstd(Y, axis=0)
return (Y - self.mean) / self.std
class NNModel(Model):
"""A simple feed-forward neural network model
Attributes
----------
model : NN
the underlying neural net on which to train and perform inference
Parameters
----------
input_size : int
the size of the input dimension of the NN
test_batch_size : Optional[int] (Defulat = 4096)
the size into which inputs should be batched
during training and inference
dropout : Optional[float] (Default = 0.0)
the dropout probability during training
See also
--------
NNDropoutModel
NNEnsembleModel
NNTwoOutputModel
"""
def __init__(
self,
input_size: int,
test_batch_size: Optional[int] = 4096,
dropout: Optional[float] = 0.0,
model_seed: Optional[int] = None,
**kwargs,
):
test_batch_size = test_batch_size or 4096
self.build_model = partial(
NN,
input_size=input_size,
num_tasks=1,
batch_size=test_batch_size,
dropout=dropout,
model_seed=model_seed,
)
self.model = self.build_model()
super().__init__(test_batch_size, **kwargs)
@property
def provides(self):
return {"means"}
@property
def type_(self):
return "nn"
def train(
self,
xs: Iterable[T],
ys: Sequence[Optional[float]],
*,
featurizer: Featurizer,
retrain: bool = False,
) -> bool:
if retrain:
self.model = self.build_model()
return self.model.train(xs, ys, featurizer)
def get_means(self, xs: List) -> ndarray:
return self.model.predict(xs)[:, 0]
def get_means_and_vars(self, xs: List) -> NoReturn:
raise TypeError("NNModel can't predict variances!")
def save(self, path) -> str:
return self.model.save(path)
def load(self, path):
self.model.load(path)
class NNEnsembleModel(Model):
"""A feed-forward neural network ensemble model for estimating mean
and variance.
Attributes
----------
models : List[NN]
the underlying neural nets on which to train and perform inference
Parameters
----------
input_size : int
the size of the input dimension of the NN
test_batch_size : Optional[int] (Defulat = 4096)
the size into which inputs should be batched
during training and inference
dropout : Optional[float] (Default = 0.0)
the dropout probability during training
ensemble_size : int (Default = 5)
the number of separate models to train
bootstrap_ensemble : bool
NOTE: UNUSED
"""
def __init__(
self,
input_size: int,
test_batch_size: Optional[int] = 4096,
dropout: Optional[float] = 0.0,
ensemble_size: int = 5,
bootstrap_ensemble: Optional[bool] = False,
model_seed: Optional[int] = None,
**kwargs,
):
test_batch_size = test_batch_size or 4096
self.build_model = partial(
NN,
input_size=input_size,
num_tasks=1,
batch_size=test_batch_size,
dropout=dropout,
model_seed=model_seed,
)
self.ensemble_size = ensemble_size
self.models = [self.build_model() for _ in range(self.ensemble_size)]
self.bootstrap_ensemble = bootstrap_ensemble # TODO: Actually use this
super().__init__(test_batch_size=test_batch_size, **kwargs)
@property
def type_(self):
return "nn"
@property
def provides(self):
return {"means", "vars"}
def train(
self,
xs: Iterable[T],
ys: Sequence[Optional[float]],
*,
featurizer: Featurizer,
retrain: bool = False,
):
if retrain:
self.models = [self.build_model() for _ in range(self.ensemble_size)]
return all([model.train(xs, ys, featurizer) for model in self.models])
def get_means(self, xs: Sequence) -> np.ndarray:
preds = np.zeros((len(xs), len(self.models)))
for j, model in tqdm(
enumerate(self.models), leave=False, desc="ensemble prediction", unit="model"
):
preds[:, j] = model.predict(xs)[:, 0]
return np.mean(preds, axis=1)
def get_means_and_vars(self, xs: Sequence) -> Tuple[np.ndarray, np.ndarray]:
preds = np.zeros((len(xs), len(self.models)))
for j, model in tqdm(
enumerate(self.models), leave=False, desc="ensemble prediction", unit="model"
):
preds[:, j] = model.predict(xs)[:, 0]
return np.mean(preds, axis=1), np.var(preds, axis=1)
def save(self, path) -> str:
for i, model in enumerate(self.models):
model.save(path, f"model_{i}")
return path
def load(self, path):
for model, model_path in zip(self.models, path.iterdir()):
model.load(model_path)
class NNTwoOutputModel(Model):
"""Feed forward neural network with two outputs so it learns to predict
its own uncertainty at the same time
Attributes
----------
model : NN
the underlying neural net on which to train and perform inference
Parameters
----------
input_size : int
the size of the input dimension of the NN
test_batch_size : Optional[int] (Defulat = 4096)
the size into which inputs should be batched
during training and inference
dropout : Optional[float] (Default = 0.0)
the dropout probability during training
"""
def __init__(
self,
input_size: int,
test_batch_size: Optional[int] = 4096,
dropout: Optional[float] = 0.0,
model_seed: Optional[int] = None,
**kwargs,
):
test_batch_size = test_batch_size or 4096
self.build_model = partial(
NN,
input_size=input_size,
num_tasks=1,
batch_size=test_batch_size,
dropout=dropout,
uncertainty="mve",
model_seed=model_seed,
)
self.model = self.build_model()
super().__init__(test_batch_size=test_batch_size, **kwargs)
@property
def type_(self):
return "nn"
@property
def provides(self):
return {"means", "vars"}
def train(
self,
xs: Iterable[T],
ys: Sequence[Optional[float]],
*,
featurizer: Featurizer,
retrain: bool = False,
) -> bool:
if retrain:
self.model = self.build_model()
return self.model.train(xs, ys, featurizer)
def get_means(self, xs: Sequence) -> np.ndarray:
preds = self.model.predict(xs)
return preds[:, 0]
def get_means_and_vars(self, xs: Sequence) -> Tuple[ndarray, ndarray]:
preds = self.model.predict(xs)
return preds[:, 0], self._safe_softplus(preds[:, 1])
def save(self, path) -> str:
return self.model.save(path)
def load(self, path):
self.model.load(path)
@classmethod
def _safe_softplus(cls, xs):
in_range = xs < 100
return np.log(1 + np.exp(xs * in_range)) * in_range + xs * (1 - in_range)
class NNDropoutModel(Model):
"""Feed forward neural network that uses MC dropout for UQ
Attributes
----------
model : NN
the underlying neural net on which to train and perform inference
dropout_size : int
the number of forward passes to perform to through the model at inference time
Parameters
----------
input_size : int
the size of the input dimension of the NN
test_batch_size : Optional[int] (Defulat = 4096)
the size into which inputs should be batched
during training and inference
dropout : Optional[float] (Default = 0.0)
the dropout probability during training
dropout_size : int (Default = 10)
the number of passes to make through the network during inference
"""
def __init__(
self,
input_size: int,
test_batch_size: Optional[int] = 4096,
dropout: Optional[float] = 0.2,
dropout_size: int = 10,
model_seed: Optional[int] = None,
**kwargs,
):
test_batch_size = test_batch_size or 4096
self.build_model = partial(
NN,
input_size=input_size,
num_tasks=1,
batch_size=test_batch_size,
dropout=dropout,
uncertainty="dropout",
model_seed=model_seed,
)
self.model = self.build_model()
self.dropout_size = dropout_size
super().__init__(test_batch_size=test_batch_size, **kwargs)
@property
def type_(self):
return "nn"
@property
def provides(self):
return {"means", "vars", "stochastic"}
def train(
self,
xs: Iterable[T],
ys: Sequence[Optional[float]],
*,
featurizer: Featurizer,
retrain: bool = False,
) -> bool:
if retrain:
self.model = self.build_model()
return self.model.train(xs, ys, featurizer)
def get_means(self, xs: Sequence) -> ndarray:
predss = self._get_predss(xs)
return np.mean(predss, axis=1)
def get_means_and_vars(self, xs: Sequence) -> Tuple[ndarray, ndarray]:
predss = self._get_predss(xs)
return np.mean(predss, axis=1), np.var(predss, axis=1)
def _get_predss(self, xs: Sequence) -> ndarray:
"""Get the predictions for each dropout pass"""
predss = np.zeros((len(xs), self.dropout_size))
for j in tqdm(
range(self.dropout_size), leave=False, desc="bootstrap prediction", unit="pass"
):
predss[:, j] = self.model.predict(xs)[:, 0]
return predss
def save(self, path) -> str:
return self.model.save(path)
def load(self, path):
self.model.load(path)
| 29.058419 | 91 | 0.5965 |
f65ceb4e61c02d69cccbd2427328a854ad1544b7 | 359 | py | Python | BotUtils.py | cancinconntg/altermebot | 1e49d592e2977087719d59c50d7b6083bfe3522b | [
"Apache-2.0"
] | null | null | null | BotUtils.py | cancinconntg/altermebot | 1e49d592e2977087719d59c50d7b6083bfe3522b | [
"Apache-2.0"
] | null | null | null | BotUtils.py | cancinconntg/altermebot | 1e49d592e2977087719d59c50d7b6083bfe3522b | [
"Apache-2.0"
] | 1 | 2021-07-21T19:58:37.000Z | 2021-07-21T19:58:37.000Z | import re
ALIAS_MAX_LENGTH = 32
ALIAS_MIN_LENGTH = 2
ALIASES_MAX_COUNT = 10
ALIASING_ENABLED = 1
ALIASING_DISABLED = 0
HEALTH_SYSTEM_MESSAGING = 0
HEALTH_SYSTEM_MESSAGING_OK = 1
def escape_markdown(text):
"""Helper function to escape telegram markup symbols."""
escape_chars = '\*_`\['
return re.sub(r'([%s])' % escape_chars, r'\\\1', text)
| 18.894737 | 60 | 0.715877 |
3f47439f089d388a8d8baa08f3bb8831434aacf7 | 1,620 | py | Python | autonormalize/examples/script.py | j-grover/autonormalize | 764a652fadf1a7ec544c3447120d11aa29a11435 | [
"BSD-3-Clause"
] | 68 | 2019-08-14T22:01:23.000Z | 2021-03-10T21:33:36.000Z | autonormalize/examples/script.py | j-grover/autonormalize | 764a652fadf1a7ec544c3447120d11aa29a11435 | [
"BSD-3-Clause"
] | 26 | 2019-08-15T15:49:19.000Z | 2021-03-08T15:27:22.000Z | autonormalize/examples/script.py | j-grover/autonormalize | 764a652fadf1a7ec544c3447120d11aa29a11435 | [
"BSD-3-Clause"
] | 9 | 2019-10-18T00:49:23.000Z | 2021-04-14T05:42:50.000Z | import os
import time
import pandas as pd
import dfd
import user_interaction
path = os.getcwd()
df = pd.read_csv(os.path.join(path, 'example_3'))
df_3 = pd.read_csv(os.path.join(path, 'example_4'))
df_acc = pd.read_csv(os.path.join(path, 'accredation.csv'))
df_acc = df_acc.drop(columns=df_acc.columns[10:])
dic_2 = {
"id": [100, 101, 102, 103, 104, 105, 106, 107, 109],
"age": [1, 2, 3, 4, 5, 6, 7, 5, 6],
"height": [4, 5, 6, 7, 8, 9, 10, 8, 9],
"less_than_5": [1, 1, 1, 1, 0, 0, 0, 0, 0]
}
dic_1 = {
"id": [100, 101, 102, 103, 104, 105, 106, 107, 109],
"age": [1, 2, 3, 4, 5, 6, 7, 5, 6],
"height": [4, 5, 6, 7, 8, 9, 10, 8, 9]
}
df_1 = pd.DataFrame(dic_1)
df_2 = pd.DataFrame(dic_2)
def print_example(str_interp, df, dim=None):
print("\n\n")
print("dependencies for: \n" + str_interp)
if dim is not None:
print(str(dim[0]) + " rows and " + str(dim[1]) + " columns\n")
print("\n")
start_time = time.time()
dep = dfd.dfd(df)
end_time = time.time()
print(dep.serialize())
print("\nexecution time: " + str(end_time - start_time))
print("\n\n")
print_example(str(dic_1), df_1)
print_example(str(dic_2), df_2)
print_example("A = index, B = random, C = random, D = random, " +
"E = c != 1, F = b < 10, G = c + d", df, (100000, 7))
# print_example("see gen file", df_3, (400000, 12))
# print_example("see gen file", df_4, (400000, 14))
print(df_acc)
deps = user_interaction.find_dependencies(df_acc)
new_dfs = user_interaction.normalization(df_acc, deps)
print(deps)
for df in new_dfs:
print(df)
| 22.816901 | 73 | 0.592593 |
4d7674ee23517cd008b6ad1efde19930e34f462e | 551 | py | Python | news/migrations/0004_auto_20191001_1620.py | alex-muliande/tribune | 86316dd4b20a76320b4b20b86266f89aac02a326 | [
"MIT"
] | 1 | 2019-10-18T13:45:42.000Z | 2019-10-18T13:45:42.000Z | news/migrations/0004_auto_20191001_1620.py | alex-muliande/tribune | 86316dd4b20a76320b4b20b86266f89aac02a326 | [
"MIT"
] | 5 | 2020-02-12T03:14:22.000Z | 2021-09-08T01:19:18.000Z | news/migrations/0004_auto_20191001_1620.py | alex-muliande/tribune | 86316dd4b20a76320b4b20b86266f89aac02a326 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-01 13:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0003_article'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='editor',
),
migrations.RemoveField(
model_name='article',
name='tags',
),
migrations.DeleteModel(
name='Article',
),
]
| 20.407407 | 46 | 0.553539 |
55fe41af298a24f05bd1b18085d0fd4b972d243f | 5,074 | py | Python | exasol_integration_test_docker_environment/lib/docker/images/create/docker_image_create_task.py | exasol/integration-test-docker-environment | 35850f67cd4cde010f03dd556d1a0f74b3291eb8 | [
"MIT"
] | 4 | 2020-06-25T20:47:31.000Z | 2021-09-10T15:22:51.000Z | exasol_integration_test_docker_environment/lib/docker/images/create/docker_image_create_task.py | exasol/integration-test-docker-environment | 35850f67cd4cde010f03dd556d1a0f74b3291eb8 | [
"MIT"
] | 113 | 2020-06-02T08:51:08.000Z | 2022-03-31T08:47:41.000Z | exasol_integration_test_docker_environment/lib/docker/images/create/docker_image_create_task.py | exasol/integration-test-docker-environment | 35850f67cd4cde010f03dd556d1a0f74b3291eb8 | [
"MIT"
] | 2 | 2020-05-19T10:57:47.000Z | 2020-06-22T13:32:20.000Z | import copy
import importlib
import luigi
from exasol_integration_test_docker_environment.lib.base.docker_base_task import DockerBaseTask
from exasol_integration_test_docker_environment.lib.base.json_pickle_parameter import JsonPickleParameter
from exasol_integration_test_docker_environment.lib.docker.images.create.docker_image_build_task import \
DockerBuildImageTask
from exasol_integration_test_docker_environment.lib.docker.images.create.docker_image_load_task import \
DockerLoadImageTask
from exasol_integration_test_docker_environment.lib.docker.images.create.docker_image_pull_task import \
DockerPullImageTask
from exasol_integration_test_docker_environment.lib.docker.images.image_info import ImageInfo, ImageState
from exasol_integration_test_docker_environment.lib.docker.images.required_task_info import RequiredTaskInfoDict, \
RequiredTaskInfo
class DockerCreateImageTask(DockerBaseTask):
image_name = luigi.Parameter()
# ParameterVisibility needs to be hidden instead of private, because otherwise a MissingParameter gets thrown
image_info = JsonPickleParameter(ImageInfo,
visibility=luigi.parameter.ParameterVisibility.HIDDEN,
significant=True) # type: ImageInfo
def run_task(self):
new_image_info = yield from self.build(self.image_info)
self.return_object(new_image_info)
def build(self, image_info: ImageInfo):
if image_info.image_state == ImageState.NEEDS_TO_BE_BUILD.name:
task = self.create_child_task(DockerBuildImageTask,
image_name=self.image_name,
image_info=image_info)
yield from self.run_dependencies(task)
image_info.image_state = ImageState.WAS_BUILD.name # TODO clone and change
return image_info
elif image_info.image_state == ImageState.CAN_BE_LOADED.name:
task = self.create_child_task(DockerLoadImageTask,
image_name=self.image_name,
image_info=image_info)
yield from self.run_dependencies(task)
image_info.image_state = ImageState.WAS_LOADED.name
return image_info
elif image_info.image_state == ImageState.REMOTE_AVAILABLE.name:
task = self.create_child_task(DockerPullImageTask,
image_name=self.image_name,
image_info=image_info)
yield from self.run_dependencies(task)
image_info.image_state = ImageState.WAS_PULLED.name
return image_info
elif image_info.image_state == ImageState.TARGET_LOCALLY_AVAILABLE.name:
image_info.image_state = ImageState.USED_LOCAL.name
return image_info
elif image_info.image_state == ImageState.SOURCE_LOCALLY_AVAILABLE.name:
image_info.image_state = ImageState.WAS_TAGED.name
self.rename_source_image_to_target_image(image_info)
return image_info
else:
raise Exception("Task %s: Image state %s not supported for image %s",
self.task_id, image_info.image_state, image_info.get_target_complete_name())
def rename_source_image_to_target_image(self, image_info):
with self._get_docker_client() as docker_client:
docker_client.images.get(image_info.get_source_complete_name()).tag(
repository=image_info.target_repository_name,
tag=image_info.get_target_complete_tag()
)
class DockerCreateImageTaskWithDeps(DockerCreateImageTask):
# ParameterVisibility needs to be hidden instead of private, because otherwise a MissingParameter gets thrown
required_task_infos = JsonPickleParameter(RequiredTaskInfoDict,
visibility=luigi.parameter.ParameterVisibility.HIDDEN,
significant=True) # type: RequiredTaskInfoDict
def register_required(self):
self.required_tasks = {key: self.create_required_task(required_task_info)
for key, required_task_info
in self.required_task_infos.infos.items()}
self.futures = self.register_dependencies(self.required_tasks)
def create_required_task(self, required_task_info: RequiredTaskInfo) -> DockerCreateImageTask:
module = importlib.import_module(required_task_info.module_name)
class_ = getattr(module, required_task_info.class_name)
instance = self.create_child_task(class_, **required_task_info.params)
return instance
def run_task(self):
image_infos = self.get_values_from_futures(self.futures)
image_info = copy.copy(self.image_info)
image_info.depends_on_images = image_infos
new_image_info = yield from self.build(image_info)
self.return_object(new_image_info)
| 53.410526 | 115 | 0.692156 |
d69fad201e1996a93a0c07ab89caabf4c7815228 | 7,056 | py | Python | train.py | l-willis/dc_tts | 086877481157bf876ab1f8ec7b4384fd607c2bb6 | [
"Apache-2.0"
] | 1,167 | 2018-01-28T00:52:42.000Z | 2022-03-24T11:59:34.000Z | train.py | l-willis/dc_tts | 086877481157bf876ab1f8ec7b4384fd607c2bb6 | [
"Apache-2.0"
] | 94 | 2018-01-28T00:53:43.000Z | 2022-03-27T06:34:16.000Z | train.py | l-willis/dc_tts | 086877481157bf876ab1f8ec7b4384fd607c2bb6 | [
"Apache-2.0"
] | 387 | 2018-01-28T07:09:47.000Z | 2022-02-21T17:23:08.000Z | # -*- coding: utf-8 -*-
# /usr/bin/python2
'''
By kyubyong park. [email protected].
https://www.github.com/kyubyong/dc_tts
'''
from __future__ import print_function
from tqdm import tqdm
from data_load import get_batch, load_vocab
from hyperparams import Hyperparams as hp
from modules import *
from networks import TextEnc, AudioEnc, AudioDec, Attention, SSRN
import tensorflow as tf
from utils import *
import sys
class Graph:
def __init__(self, num=1, mode="train"):
'''
Args:
num: Either 1 or 2. 1 for Text2Mel 2 for SSRN.
mode: Either "train" or "synthesize".
'''
# Load vocabulary
self.char2idx, self.idx2char = load_vocab()
# Set flag
training = True if mode=="train" else False
# Graph
# Data Feeding
## L: Text. (B, N), int32
## mels: Reduced melspectrogram. (B, T/r, n_mels) float32
## mags: Magnitude. (B, T, n_fft//2+1) float32
if mode=="train":
self.L, self.mels, self.mags, self.fnames, self.num_batch = get_batch()
self.prev_max_attentions = tf.ones(shape=(hp.B,), dtype=tf.int32)
self.gts = tf.convert_to_tensor(guided_attention())
else: # Synthesize
self.L = tf.placeholder(tf.int32, shape=(None, None))
self.mels = tf.placeholder(tf.float32, shape=(None, None, hp.n_mels))
self.prev_max_attentions = tf.placeholder(tf.int32, shape=(None,))
if num==1 or (not training):
with tf.variable_scope("Text2Mel"):
# Get S or decoder inputs. (B, T//r, n_mels)
self.S = tf.concat((tf.zeros_like(self.mels[:, :1, :]), self.mels[:, :-1, :]), 1)
# Networks
with tf.variable_scope("TextEnc"):
self.K, self.V = TextEnc(self.L, training=training) # (N, Tx, e)
with tf.variable_scope("AudioEnc"):
self.Q = AudioEnc(self.S, training=training)
with tf.variable_scope("Attention"):
# R: (B, T/r, 2d)
# alignments: (B, N, T/r)
# max_attentions: (B,)
self.R, self.alignments, self.max_attentions = Attention(self.Q, self.K, self.V,
mononotic_attention=(not training),
prev_max_attentions=self.prev_max_attentions)
with tf.variable_scope("AudioDec"):
self.Y_logits, self.Y = AudioDec(self.R, training=training) # (B, T/r, n_mels)
else: # num==2 & training. Note that during training,
# the ground truth melspectrogram values are fed.
with tf.variable_scope("SSRN"):
self.Z_logits, self.Z = SSRN(self.mels, training=training)
if not training:
# During inference, the predicted melspectrogram values are fed.
with tf.variable_scope("SSRN"):
self.Z_logits, self.Z = SSRN(self.Y, training=training)
with tf.variable_scope("gs"):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
if training:
if num==1: # Text2Mel
# mel L1 loss
self.loss_mels = tf.reduce_mean(tf.abs(self.Y - self.mels))
# mel binary divergence loss
self.loss_bd1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.Y_logits, labels=self.mels))
# guided_attention loss
self.A = tf.pad(self.alignments, [(0, 0), (0, hp.max_N), (0, hp.max_T)], mode="CONSTANT", constant_values=-1.)[:, :hp.max_N, :hp.max_T]
self.attention_masks = tf.to_float(tf.not_equal(self.A, -1))
self.loss_att = tf.reduce_sum(tf.abs(self.A * self.gts) * self.attention_masks)
self.mask_sum = tf.reduce_sum(self.attention_masks)
self.loss_att /= self.mask_sum
# total loss
self.loss = self.loss_mels + self.loss_bd1 + self.loss_att
tf.summary.scalar('train/loss_mels', self.loss_mels)
tf.summary.scalar('train/loss_bd1', self.loss_bd1)
tf.summary.scalar('train/loss_att', self.loss_att)
tf.summary.image('train/mel_gt', tf.expand_dims(tf.transpose(self.mels[:1], [0, 2, 1]), -1))
tf.summary.image('train/mel_hat', tf.expand_dims(tf.transpose(self.Y[:1], [0, 2, 1]), -1))
else: # SSRN
# mag L1 loss
self.loss_mags = tf.reduce_mean(tf.abs(self.Z - self.mags))
# mag binary divergence loss
self.loss_bd2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.Z_logits, labels=self.mags))
# total loss
self.loss = self.loss_mags + self.loss_bd2
tf.summary.scalar('train/loss_mags', self.loss_mags)
tf.summary.scalar('train/loss_bd2', self.loss_bd2)
tf.summary.image('train/mag_gt', tf.expand_dims(tf.transpose(self.mags[:1], [0, 2, 1]), -1))
tf.summary.image('train/mag_hat', tf.expand_dims(tf.transpose(self.Z[:1], [0, 2, 1]), -1))
# Training Scheme
self.lr = learning_rate_decay(hp.lr, self.global_step)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
tf.summary.scalar("lr", self.lr)
## gradient clipping
self.gvs = self.optimizer.compute_gradients(self.loss)
self.clipped = []
for grad, var in self.gvs:
grad = tf.clip_by_value(grad, -1., 1.)
self.clipped.append((grad, var))
self.train_op = self.optimizer.apply_gradients(self.clipped, global_step=self.global_step)
# Summary
self.merged = tf.summary.merge_all()
if __name__ == '__main__':
# argument: 1 or 2. 1 for Text2mel, 2 for SSRN.
num = int(sys.argv[1])
g = Graph(num=num); print("Training Graph loaded")
logdir = hp.logdir + "-" + str(num)
sv = tf.train.Supervisor(logdir=logdir, save_model_secs=0, global_step=g.global_step)
with sv.managed_session() as sess:
while 1:
for _ in tqdm(range(g.num_batch), total=g.num_batch, ncols=70, leave=False, unit='b'):
gs, _ = sess.run([g.global_step, g.train_op])
# Write checkpoint files at every 1k steps
if gs % 1000 == 0:
sv.saver.save(sess, logdir + '/model_gs_{}'.format(str(gs // 1000).zfill(3) + "k"))
if num==1:
# plot alignment
alignments = sess.run(g.alignments)
plot_alignment(alignments[0], str(gs // 1000).zfill(3) + "k", logdir)
# break
if gs > hp.num_iterations: break
print("Done")
| 43.288344 | 151 | 0.558957 |
de252525d4255b589ecc3a4a59ea424990fa18a4 | 2,283 | py | Python | service/googlecloudstorage.py | sesam-community/xml-translator | b77f18ca8b85012df21843543e386945f0824265 | [
"Apache-2.0"
] | null | null | null | service/googlecloudstorage.py | sesam-community/xml-translator | b77f18ca8b85012df21843543e386945f0824265 | [
"Apache-2.0"
] | 2 | 2019-12-26T17:05:03.000Z | 2020-01-06T19:13:43.000Z | service/googlecloudstorage.py | sesam-community/xml-translator | b77f18ca8b85012df21843543e386945f0824265 | [
"Apache-2.0"
] | 1 | 2019-11-12T09:29:11.000Z | 2019-11-12T09:29:11.000Z | from google.cloud import storage
import logging
from flask import abort
# class for reading files from a Google cloud storage
class GoogleCloudStorage:
"""
initiate class, the class is taking the path to the file storing the credentials for the Google cloud storage,
the credentials itself, and the name of the bucket where the xml files resides as parameters
"""
def __init__(self, credentialspath, credentials, bucketname):
# write the content of the credentials to the path specified by credentialspath
if credentials:
with open(credentialspath, "wb") as out_file:
out_file.write(credentials.encode())
self.bucket = bucketname
pass
# method for retrieving a list of the files in the Google cloud storage bucket
def getlistofxmlfiles(self, path):
def generate():
# get all the blobs (files) in the bucket
blobs = bucket.list_blobs()
# loop through the blogs and only return those in the root folder of the bucket
for blob in blobs:
if blob.name.startswith(path) and not blob.name.endswith("/"): # subfolder object
yield blob.name
try:
# initiate Google cloud storage client
storage_client = storage.Client()
# get the bucket from the Google cloud storage
bucket = storage_client.get_bucket(self.bucket)
# return all the files in the bucket
return generate()
except Exception as e:
logging.error(str(e))
abort(type(e).__name__, str(e))
# method for downloading the content of a file in the Google cloud storage bucket
def download(self, filename):
# initiate Google cloud storage client
storage_client = storage.Client()
# get the bucket from the Google cloud storage
bucket = storage_client.get_bucket(self.bucket)
try:
# set chunk size
chunk_size = 262144 * 4 * 10
# get the blob from the bucket
blob = bucket.blob(filename)
return blob.download_as_string()
except Exception as e:
logging.error(str(e))
abort(type(e).__name__, str(e))
| 37.42623 | 115 | 0.631187 |
e8b5fdb0b1b58a4479f483c85f1113b570315da4 | 2,458 | py | Python | src/puremvc/patterns/mediator.py | scVENUS/puremvc-python-standard-framework | 942bec84220fbc601e8064104199881271ad54c9 | [
"BSD-3-Clause"
] | 12 | 2015-01-26T03:48:00.000Z | 2021-12-13T06:08:28.000Z | src/puremvc/patterns/mediator.py | scVENUS/puremvc-python-standard-framework | 942bec84220fbc601e8064104199881271ad54c9 | [
"BSD-3-Clause"
] | 4 | 2016-01-25T15:48:35.000Z | 2018-02-19T17:02:20.000Z | src/puremvc/patterns/mediator.py | scVENUS/puremvc-python-standard-framework | 942bec84220fbc601e8064104199881271ad54c9 | [
"BSD-3-Clause"
] | 12 | 2015-09-02T03:49:52.000Z | 2021-01-24T15:23:59.000Z | """
PureMVC Python Port by Toby de Havilland <[email protected]>
PureMVC - Copyright(c) 2006-08 Futurescale, Inc., Some rights reserved.
Your reuse is governed by the Creative Commons Attribution 3.0 License
"""
import puremvc.interfaces
import puremvc.patterns.observer
import puremvc.patterns.facade
class Mediator(puremvc.patterns.observer.Notifier, puremvc.interfaces.IMediator, puremvc.interfaces.INotifier):
"""
A base C{IMediator} implementation.
@see: L{View<org.puremvc.as3.core.view.View>}
"""
NAME = "Mediator"
def __init__(self, mediatorName=None, viewComponent=None):
"""
Mediator Constructor
Typically, a C{Mediator} will be written to serve
one specific control or group controls and so,
will not have a need to be dynamically named.
"""
self.facade = puremvc.patterns.facade.Facade.getInstance()
mediatorName = mediatorName or self.NAME
if mediatorName is None:
raise ValueError("Mediator name cannot be None")
self.mediatorName = mediatorName
self.setViewComponent(viewComponent)
def getMediatorName(self):
"""
Get the name of the C{Mediator}.
@return: the Mediator name
"""
return self.mediatorName
def setViewComponent(self,viewComponent):
"""
Set the C{IMediator}'s view component.
@param viewComponent: the view component
"""
self.viewComponent = viewComponent
def getViewComponent(self):
"""
Get the C{Mediator}'s view component.
@return: the view component
"""
return self.viewComponent
def listNotificationInterests(self):
"""
List the C{INotification} names this
C{Mediator} is interested in being notified of.
@return: List the list of C{INotification} names
"""
return []
def handleNotification(self,notification):
"""
Handle C{INotification}s.
Typically this will be handled in a if/else statement,
with one 'comparison' entry per C{INotification}
the C{Mediator} is interested in.
"""
pass
def onRegister(self):
"""
Called by the View when the Mediator is registered
"""
pass
def onRemove(self):
"""
Called by the View when the Mediator is removed
"""
pass
| 27.931818 | 111 | 0.633035 |
f5acc46b66e681a60e2ff23dd03f15da3b24a902 | 4,912 | py | Python | telestream_cloud_qc_sdk/telestream_cloud_qc/models/header_byte_count_test.py | pandastream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | null | null | null | telestream_cloud_qc_sdk/telestream_cloud_qc/models/header_byte_count_test.py | pandastream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | 2 | 2016-07-06T14:13:31.000Z | 2018-03-07T12:54:58.000Z | telestream_cloud_qc_sdk/telestream_cloud_qc/models/header_byte_count_test.py | Telestream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class HeaderByteCountTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'header_bytes': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'header_bytes': 'header_bytes',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, header_bytes=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""HeaderByteCountTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._header_bytes = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if header_bytes is not None:
self.header_bytes = header_bytes
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def header_bytes(self):
"""Gets the header_bytes of this HeaderByteCountTest. # noqa: E501
:return: The header_bytes of this HeaderByteCountTest. # noqa: E501
:rtype: int
"""
return self._header_bytes
@header_bytes.setter
def header_bytes(self, header_bytes):
"""Sets the header_bytes of this HeaderByteCountTest.
:param header_bytes: The header_bytes of this HeaderByteCountTest. # noqa: E501
:type: int
"""
self._header_bytes = header_bytes
@property
def reject_on_error(self):
"""Gets the reject_on_error of this HeaderByteCountTest. # noqa: E501
:return: The reject_on_error of this HeaderByteCountTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this HeaderByteCountTest.
:param reject_on_error: The reject_on_error of this HeaderByteCountTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this HeaderByteCountTest. # noqa: E501
:return: The checked of this HeaderByteCountTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this HeaderByteCountTest.
:param checked: The checked of this HeaderByteCountTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HeaderByteCountTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HeaderByteCountTest):
return True
return self.to_dict() != other.to_dict()
| 28.229885 | 123 | 0.599959 |
1d859ab31d8318df08dc7e7977cee8558ecd90a2 | 437 | py | Python | 2020/skel/main.py | MWers/advent-of-code | 15cc983d81e0154fd3926739c77b33dc1da911a4 | [
"MIT"
] | null | null | null | 2020/skel/main.py | MWers/advent-of-code | 15cc983d81e0154fd3926739c77b33dc1da911a4 | [
"MIT"
] | null | null | null | 2020/skel/main.py | MWers/advent-of-code | 15cc983d81e0154fd3926739c77b33dc1da911a4 | [
"MIT"
] | null | null | null | import argparse
from typing import List
parser = argparse.ArgumentParser(description='Run an Advent of Code program')
parser.add_argument(
'input_file', type=str, help='the file containing input data'
)
parser.add_argument(
'--debug', action='store_true', help='print debug messages'
)
args = parser.parse_args()
input_data: List = []
with open(args.input_file) as f:
input_data = [line for line in f.read().splitlines()]
| 25.705882 | 77 | 0.732265 |
a2a5779fc48816f5c96f4082c4484d18b5807dc6 | 1,305 | py | Python | Algorithms/1092/spfa.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | Algorithms/1092/spfa.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | Algorithms/1092/spfa.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | from typing import List
from queue import Queue,PriorityQueue
class Solution:
def shortestCommonSupersequence(self, str1: str, str2: str) -> str:
opt = str1 + str2
lenDic = {(0, 0): 0}
q = Queue()
q.put(("", 0, 0))
while not q.empty():
now, i1, i2 = q.get()
if i1 >= len(str1) or i2 >= len(str2):
nOpt = now + str1[i1:] + str2[i2:]
if len(nOpt) < len(opt):
opt = nOpt
lenDic[(len(str1), len(str2))] = len(opt)
continue
if str1[i1] == str2[i2]:
nNow = now + str1[i1]
oLen = lenDic.get((i1+1, i2+1), len(opt))
if len(nNow) < oLen:
lenDic[(i1+1, i2+1)] = len(nNow)
q.put((nNow, i1+1, i2+1))
nNow = now + str1[i1]
oLen = lenDic.get((i1+1, i2), len(opt))
if len(nNow) < oLen:
lenDic[(i1+1, i2)] = len(nNow)
q.put((nNow, i1+1, i2))
nNow = now + str2[i2]
oLen = lenDic.get((i1, i2+1), len(opt))
if len(nNow) < oLen:
lenDic[(i1, i2+1)] = len(nNow)
q.put((nNow, i1, i2+1))
return opt | 32.625 | 71 | 0.413793 |
401254f5d4ef3eb9a58793100c573fbdc8ac8e4e | 19,289 | py | Python | train.py | liv0vil/Computer-Vision-FixMatch | 7614cde1b4be1d5461e3354faacad33a01f1a9b2 | [
"MIT"
] | 503 | 2020-01-30T02:48:16.000Z | 2022-03-30T13:32:46.000Z | train.py | liv0vil/Computer-Vision-FixMatch | 7614cde1b4be1d5461e3354faacad33a01f1a9b2 | [
"MIT"
] | 48 | 2020-02-28T09:38:19.000Z | 2022-02-09T11:07:05.000Z | train.py | liv0vil/Computer-Vision-FixMatch | 7614cde1b4be1d5461e3354faacad33a01f1a9b2 | [
"MIT"
] | 130 | 2020-01-31T08:49:14.000Z | 2022-03-29T07:58:10.000Z | import argparse
import logging
import math
import os
import random
import shutil
import time
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from dataset.cifar import DATASET_GETTERS
from utils import AverageMeter, accuracy
logger = logging.getLogger(__name__)
best_acc = 0
def save_checkpoint(state, is_best, checkpoint, filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint,
'model_best.pth.tar'))
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_cosine_schedule_with_warmup(optimizer,
num_warmup_steps,
num_training_steps,
num_cycles=7./16.,
last_epoch=-1):
def _lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
no_progress = float(current_step - num_warmup_steps) / \
float(max(1, num_training_steps - num_warmup_steps))
return max(0., math.cos(math.pi * num_cycles * no_progress))
return LambdaLR(optimizer, _lr_lambda, last_epoch)
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave(x, size):
s = list(x.shape)
return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def main():
parser = argparse.ArgumentParser(description='PyTorch FixMatch Training')
parser.add_argument('--gpu-id', default='0', type=int,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--num-workers', type=int, default=4,
help='number of workers')
parser.add_argument('--dataset', default='cifar10', type=str,
choices=['cifar10', 'cifar100'],
help='dataset name')
parser.add_argument('--num-labeled', type=int, default=4000,
help='number of labeled data')
parser.add_argument("--expand-labels", action="store_true",
help="expand labels to fit eval steps")
parser.add_argument('--arch', default='wideresnet', type=str,
choices=['wideresnet', 'resnext'],
help='dataset name')
parser.add_argument('--total-steps', default=2**20, type=int,
help='number of total steps to run')
parser.add_argument('--eval-step', default=1024, type=int,
help='number of eval steps to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch-size', default=64, type=int,
help='train batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
help='initial learning rate')
parser.add_argument('--warmup', default=0, type=float,
help='warmup epochs (unlabeled data based)')
parser.add_argument('--wdecay', default=5e-4, type=float,
help='weight decay')
parser.add_argument('--nesterov', action='store_true', default=True,
help='use nesterov momentum')
parser.add_argument('--use-ema', action='store_true', default=True,
help='use EMA model')
parser.add_argument('--ema-decay', default=0.999, type=float,
help='EMA decay rate')
parser.add_argument('--mu', default=7, type=int,
help='coefficient of unlabeled batch size')
parser.add_argument('--lambda-u', default=1, type=float,
help='coefficient of unlabeled loss')
parser.add_argument('--T', default=1, type=float,
help='pseudo label temperature')
parser.add_argument('--threshold', default=0.95, type=float,
help='pseudo label threshold')
parser.add_argument('--out', default='result',
help='directory to output the result')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--seed', default=None, type=int,
help="random seed")
parser.add_argument("--amp", action="store_true",
help="use 16-bit (mixed) precision through NVIDIA apex AMP")
parser.add_argument("--opt_level", type=str, default="O1",
help="apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--no-progress', action='store_true',
help="don't use progress bar")
args = parser.parse_args()
global best_acc
def create_model(args):
if args.arch == 'wideresnet':
import models.wideresnet as models
model = models.build_wideresnet(depth=args.model_depth,
widen_factor=args.model_width,
dropout=0,
num_classes=args.num_classes)
elif args.arch == 'resnext':
import models.resnext as models
model = models.build_resnext(cardinality=args.model_cardinality,
depth=args.model_depth,
width=args.model_width,
num_classes=args.num_classes)
logger.info("Total params: {:.2f}M".format(
sum(p.numel() for p in model.parameters())/1e6))
return model
if args.local_rank == -1:
device = torch.device('cuda', args.gpu_id)
args.world_size = 1
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.world_size = torch.distributed.get_world_size()
args.n_gpu = 1
args.device = device
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning(
f"Process rank: {args.local_rank}, "
f"device: {args.device}, "
f"n_gpu: {args.n_gpu}, "
f"distributed training: {bool(args.local_rank != -1)}, "
f"16-bits training: {args.amp}",)
logger.info(dict(args._get_kwargs()))
if args.seed is not None:
set_seed(args)
if args.local_rank in [-1, 0]:
os.makedirs(args.out, exist_ok=True)
args.writer = SummaryWriter(args.out)
if args.dataset == 'cifar10':
args.num_classes = 10
if args.arch == 'wideresnet':
args.model_depth = 28
args.model_width = 2
elif args.arch == 'resnext':
args.model_cardinality = 4
args.model_depth = 28
args.model_width = 4
elif args.dataset == 'cifar100':
args.num_classes = 100
if args.arch == 'wideresnet':
args.model_depth = 28
args.model_width = 8
elif args.arch == 'resnext':
args.model_cardinality = 8
args.model_depth = 29
args.model_width = 64
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
labeled_dataset, unlabeled_dataset, test_dataset = DATASET_GETTERS[args.dataset](
args, './data')
if args.local_rank == 0:
torch.distributed.barrier()
train_sampler = RandomSampler if args.local_rank == -1 else DistributedSampler
labeled_trainloader = DataLoader(
labeled_dataset,
sampler=train_sampler(labeled_dataset),
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=True)
unlabeled_trainloader = DataLoader(
unlabeled_dataset,
sampler=train_sampler(unlabeled_dataset),
batch_size=args.batch_size*args.mu,
num_workers=args.num_workers,
drop_last=True)
test_loader = DataLoader(
test_dataset,
sampler=SequentialSampler(test_dataset),
batch_size=args.batch_size,
num_workers=args.num_workers)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
model = create_model(args)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
no_decay = ['bias', 'bn']
grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.wdecay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optim.SGD(grouped_parameters, lr=args.lr,
momentum=0.9, nesterov=args.nesterov)
args.epochs = math.ceil(args.total_steps / args.eval_step)
scheduler = get_cosine_schedule_with_warmup(
optimizer, args.warmup, args.total_steps)
if args.use_ema:
from models.ema import ModelEMA
ema_model = ModelEMA(args, model, args.ema_decay)
args.start_epoch = 0
if args.resume:
logger.info("==> Resuming from checkpoint..")
assert os.path.isfile(
args.resume), "Error: no checkpoint directory found!"
args.out = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if args.use_ema:
ema_model.ema.load_state_dict(checkpoint['ema_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
if args.amp:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.opt_level)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
logger.info("***** Running training *****")
logger.info(f" Task = {args.dataset}@{args.num_labeled}")
logger.info(f" Num Epochs = {args.epochs}")
logger.info(f" Batch size per GPU = {args.batch_size}")
logger.info(
f" Total train batch size = {args.batch_size*args.world_size}")
logger.info(f" Total optimization steps = {args.total_steps}")
model.zero_grad()
train(args, labeled_trainloader, unlabeled_trainloader, test_loader,
model, optimizer, ema_model, scheduler)
def train(args, labeled_trainloader, unlabeled_trainloader, test_loader,
model, optimizer, ema_model, scheduler):
if args.amp:
from apex import amp
global best_acc
test_accs = []
end = time.time()
if args.world_size > 1:
labeled_epoch = 0
unlabeled_epoch = 0
labeled_trainloader.sampler.set_epoch(labeled_epoch)
unlabeled_trainloader.sampler.set_epoch(unlabeled_epoch)
labeled_iter = iter(labeled_trainloader)
unlabeled_iter = iter(unlabeled_trainloader)
model.train()
for epoch in range(args.start_epoch, args.epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_x = AverageMeter()
losses_u = AverageMeter()
mask_probs = AverageMeter()
if not args.no_progress:
p_bar = tqdm(range(args.eval_step),
disable=args.local_rank not in [-1, 0])
for batch_idx in range(args.eval_step):
try:
inputs_x, targets_x = labeled_iter.next()
except:
if args.world_size > 1:
labeled_epoch += 1
labeled_trainloader.sampler.set_epoch(labeled_epoch)
labeled_iter = iter(labeled_trainloader)
inputs_x, targets_x = labeled_iter.next()
try:
(inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()
except:
if args.world_size > 1:
unlabeled_epoch += 1
unlabeled_trainloader.sampler.set_epoch(unlabeled_epoch)
unlabeled_iter = iter(unlabeled_trainloader)
(inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()
data_time.update(time.time() - end)
batch_size = inputs_x.shape[0]
inputs = interleave(
torch.cat((inputs_x, inputs_u_w, inputs_u_s)), 2*args.mu+1).to(args.device)
targets_x = targets_x.to(args.device)
logits = model(inputs)
logits = de_interleave(logits, 2*args.mu+1)
logits_x = logits[:batch_size]
logits_u_w, logits_u_s = logits[batch_size:].chunk(2)
del logits
Lx = F.cross_entropy(logits_x, targets_x, reduction='mean')
pseudo_label = torch.softmax(logits_u_w.detach()/args.T, dim=-1)
max_probs, targets_u = torch.max(pseudo_label, dim=-1)
mask = max_probs.ge(args.threshold).float()
Lu = (F.cross_entropy(logits_u_s, targets_u,
reduction='none') * mask).mean()
loss = Lx + args.lambda_u * Lu
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
losses.update(loss.item())
losses_x.update(Lx.item())
losses_u.update(Lu.item())
optimizer.step()
scheduler.step()
if args.use_ema:
ema_model.update(model)
model.zero_grad()
batch_time.update(time.time() - end)
end = time.time()
mask_probs.update(mask.mean().item())
if not args.no_progress:
p_bar.set_description("Train Epoch: {epoch}/{epochs:4}. Iter: {batch:4}/{iter:4}. LR: {lr:.4f}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. Loss_x: {loss_x:.4f}. Loss_u: {loss_u:.4f}. Mask: {mask:.2f}. ".format(
epoch=epoch + 1,
epochs=args.epochs,
batch=batch_idx + 1,
iter=args.eval_step,
lr=scheduler.get_last_lr()[0],
data=data_time.avg,
bt=batch_time.avg,
loss=losses.avg,
loss_x=losses_x.avg,
loss_u=losses_u.avg,
mask=mask_probs.avg))
p_bar.update()
if not args.no_progress:
p_bar.close()
if args.use_ema:
test_model = ema_model.ema
else:
test_model = model
if args.local_rank in [-1, 0]:
test_loss, test_acc = test(args, test_loader, test_model, epoch)
args.writer.add_scalar('train/1.train_loss', losses.avg, epoch)
args.writer.add_scalar('train/2.train_loss_x', losses_x.avg, epoch)
args.writer.add_scalar('train/3.train_loss_u', losses_u.avg, epoch)
args.writer.add_scalar('train/4.mask', mask_probs.avg, epoch)
args.writer.add_scalar('test/1.test_acc', test_acc, epoch)
args.writer.add_scalar('test/2.test_loss', test_loss, epoch)
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
model_to_save = model.module if hasattr(model, "module") else model
if args.use_ema:
ema_to_save = ema_model.ema.module if hasattr(
ema_model.ema, "module") else ema_model.ema
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model_to_save.state_dict(),
'ema_state_dict': ema_to_save.state_dict() if args.use_ema else None,
'acc': test_acc,
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
}, is_best, args.out)
test_accs.append(test_acc)
logger.info('Best top-1 acc: {:.2f}'.format(best_acc))
logger.info('Mean top-1 acc: {:.2f}\n'.format(
np.mean(test_accs[-20:])))
if args.local_rank in [-1, 0]:
args.writer.close()
def test(args, test_loader, model, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
if not args.no_progress:
test_loader = tqdm(test_loader,
disable=args.local_rank not in [-1, 0])
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
data_time.update(time.time() - end)
model.eval()
inputs = inputs.to(args.device)
targets = targets.to(args.device)
outputs = model(inputs)
loss = F.cross_entropy(outputs, targets)
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.shape[0])
top1.update(prec1.item(), inputs.shape[0])
top5.update(prec5.item(), inputs.shape[0])
batch_time.update(time.time() - end)
end = time.time()
if not args.no_progress:
test_loader.set_description("Test Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. top1: {top1:.2f}. top5: {top5:.2f}. ".format(
batch=batch_idx + 1,
iter=len(test_loader),
data=data_time.avg,
bt=batch_time.avg,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
))
if not args.no_progress:
test_loader.close()
logger.info("top-1 acc: {:.2f}".format(top1.avg))
logger.info("top-5 acc: {:.2f}".format(top5.avg))
return losses.avg, top1.avg
if __name__ == '__main__':
main()
| 39.046559 | 238 | 0.581938 |
7a1314852587354c517d97d413c6b60b2a6101e2 | 37,260 | py | Python | ARTDeco/main.py | davebx/ARTDeco | 1b08583a2e23015f6bc444919b2e10d5fb1bcf19 | [
"MIT"
] | 1 | 2022-02-02T04:03:45.000Z | 2022-02-02T04:03:45.000Z | ARTDeco/main.py | davebx/ARTDeco | 1b08583a2e23015f6bc444919b2e10d5fb1bcf19 | [
"MIT"
] | 8 | 2019-04-29T18:33:38.000Z | 2022-03-16T12:17:52.000Z | ARTDeco/main.py | davebx/ARTDeco | 1b08583a2e23015f6bc444919b2e10d5fb1bcf19 | [
"MIT"
] | 4 | 2019-04-26T21:44:51.000Z | 2022-03-10T15:32:20.000Z | '''
Main script for running ARTDeco. Contains code to run each of the modes.
'''
from .misc import ARTDecoDir,infer_experiments_group,output_inferred_format,summarize_bam_files,get_regions_exp
from .preprocess import parse_gtf,create_stranded_downstream_df,create_stranded_read_in_df,\
create_unstranded_downstream_df,create_unstranded_read_in_df,make_multi_tag_dirs
from .readthrough import get_multi_gene_exp,get_max_isoform,get_gene_v_intergenic,deconvolute_exp,assign_genes,\
summarize_readthrough_stats,summarize_read_in_assignments
from .diff_exp_read_in import read_in_diff_exp,assign_read_in_genes,summarize_diff_exp_read_in_assignments
from .get_dogs import get_dog_screening,generate_screening_bed,get_multi_interval_coverage,generate_full_screening_bed,\
get_multi_dog_beds,merge_dogs,get_dog_exp,summarize_all_dogs
import argparse
import os
import sys
import pandas as pd
def main():
#Make command line interface.
parser = argparse.ArgumentParser(prog='ARTDeco',
description='Main script for Automatic ReadThrough DEteCtiOn-ARTDeco')
parser.add_argument('-mode',
help='Mode in which to run ARTDeco. Options are preprocess, readthrough, diff_exp_read_in, \
get_dogs, and diff_exp_dogs.',action='store')
parser.add_argument('-home-dir',help='Directory in which to run ARTDeco (default is current directory)',
action='store',default='.')
parser.add_argument('-bam-files-dir',
help='Directory in which the BAM files are located (default is current directory)',
action='store',default='.')
parser.add_argument('-layout',
help='Indicates whether the files are paired-end or single-end. Options are PE or SE.',
action='store')
parser.add_argument('-stranded',
help='Indicates whether the files are stranded or unstranded. Options are True or False.',
action='store')
parser.add_argument('-orientation',
help='Indicates whether the files are forward- or reverse-stranded. Options are Forward or '+
'Reverse. Required for stranded data',action='store')
parser.add_argument('-single',
help='Indicates whether you want to create tag directories with a single file (useful for new'+\
'assemblies with lots of scaffolds).',default=False,action='store_true')
parser.add_argument('-gtf-file',help='GTF file',action='store')
parser.add_argument('-cpu',help='Maximum CPUs to use',action='store',type=int,default=1)
parser.add_argument('-chrom-sizes-file',help='Chromosome sizes file')
parser.add_argument('-read-in-dist',help='Read-in distance. Default is 1 kb.',type=int,default=1000)
parser.add_argument('-readthrough-dist',help='Readthrough distance. Default is 10 kb.',type=int,default=10000)
parser.add_argument('-intergenic-min-len',help='Minimum length for intergenic regions. Default is 100 bp.',type=int,
default=100)
parser.add_argument('-intergenic-max-len',help='Maximum length for intergenic regions. Default is 15 kb.',type=int,
default=15000)
parser.add_argument('-read-in-threshold', help='Threshold for considering read-in gene. Default is -1.',type=float,
default=-1)
parser.add_argument('-read-in-fpkm',help='Minimum FPKM value for considering a gene. Default is 0.25 FPKM.',
type=float,default=0.25)
parser.add_argument('-summary-genes', help='Number of genes for summarizing readthrough levels. Default is 1000.',
type=int,default=1000)
parser.add_argument('-overwrite',help='Indicate whether to overwrite existing files',default=False,
action='store_true')
parser.add_argument('-meta-file',help='Meta file',action='store',type=str)
parser.add_argument('-comparisons-file',help='Comparisons file',type=str)
parser.add_argument('-log2FC', help='Minimum log2 fold change for considering a gene upregulated. Default is 2.',
type=float,default=2)
parser.add_argument('-pval', help='Maximum p-value for considering a gene upregulated. Default is 0.05.',type=float,
default=0.05)
parser.add_argument('-min-dog-len',help='Minimum DoG length. Default is 4 kb.',type=int,default=4000)
parser.add_argument('-dog-window',help='DoG window size. Default is 500 bp.',type=int,default=500)
parser.add_argument('-min-dog-coverage',help='Minimum FPKM for DoG discovery. Default is 0.15 FPKM.',type=float,
default=0.15)
parser.add_argument('-gene-types',help='Limit gene sets for reporting. Default is all gene types.',nargs='+',
action="store")
parser.add_argument('-skip-bam-summary',help='Skip summary of BAM files (useful for time saving).',default=False,
action='store_true')
args = parser.parse_known_args()[0]
if args.mode in ['preprocess','readthrough','get_dogs','diff_exp_read_in','diff_exp_dogs']:
print(f'Running {args.mode} mode...')
else:
print('No valid run mode specified... Will generate all files...')
args.mode = None
#Check if home and BAM file directories exist. If they do, load ARTDeco file structure.
if os.path.isdir(args.home_dir) and os.path.isdir(args.bam_files_dir):
print('Loading ARTDeco file structure...')
artdeco_dir = ARTDecoDir(args.bam_files_dir,args.home_dir)
elif not os.path.isdir(args.home_dir):
print('User-specified home directory does not exist... Exiting...')
sys.exit(1)
else:
print('User-specified BAM file directory does not exist... Exiting...')
sys.exit(1)
#Check for BAM files. If there are no BAM files, exit.
if len(artdeco_dir.bam_files) == 0:
print('No BAM files... Exiting...')
sys.exit(1)
#Overwrite specified.
if args.overwrite:
print('Overwrite specified... Will regenerate all files...')
#Create summary file directory if it does not exist.
if not os.path.isdir(artdeco_dir.summary_dir):
os.mkdir(artdeco_dir.summary_dir)
#Create preprocess_files directory if it does not exist.
if not os.path.isdir(artdeco_dir.preprocess_dir):
os.mkdir(artdeco_dir.preprocess_dir)
#Generate meta and comparisons if it is needed.
if (args.mode and (args.mode in ['diff_exp_read_in','diff_exp_dogs']) or
(args.mode == 'preprocess' and args.meta_file)) or (not args.mode and args.meta_file):
from .DESeq2 import reformat_meta,reformat_comparisons,generate_comparisons,load_deseq_dataset,run_deseq,\
deseq_results
#Specify whether meta file needs to be generated.
if args.overwrite:
new_meta = True
elif os.path.isfile(artdeco_dir.meta_file):
print('Reformatted meta file exists...')
new_meta = False
else:
new_meta = True
#Generate meta.
if new_meta:
#Check meta file.
if args.meta_file and os.path.isfile(args.meta_file):
meta = open(args.meta_file).readlines()
meta_format = True
i = 0
while i < len(meta) and meta_format:
line = meta[i].strip().split('\t')
#If the length of the split line is different than 2, the meta file isn't properly formatted.
if len(line) != 2:
meta_format = False
#Otherwise, ensure that the first line is the proper format.
else:
if i == 0:
if line[0] != 'Experiment' or line[1] != 'Group':
meta_format = False
i += 1
if meta_format:
print('Meta file properly formatted... Generating reformatted meta...')
reformat_meta(args.meta_file,artdeco_dir.preprocess_dir)
else:
print('Meta file not properly formatted... Exiting...')
sys.exit(1)
elif args.meta_file:
print('Meta file does not exist... Exiting...')
sys.exit(1)
else:
print('No meta file supplied... Exiting...')
sys.exit(1)
#Specify whether comparisons file needs to be generated.
if args.overwrite:
new_comparisons = True
elif new_meta:
new_comparisons = True
elif os.path.isfile(artdeco_dir.comparisons_file):
print('Reformatted comparisons file exists...')
new_comparisons = False
else:
new_comparisons = True
#Generate comparisons.
if new_comparisons:
#Grab groups.
groups = [line.strip().split('\t')[1] for line in open(artdeco_dir.meta_file).readlines()[1:]]
#Check comparisons file.
if args.comparisons_file and os.path.isfile(args.comparisons_file):
print('Comparisons file exists...')
#Check format.
comparisons = [line.strip().split('\t') for line in open(args.comparisons_file).readlines()]
comparisons_lens = [len(line) for line in comparisons]
#Check if lines are tab-separated formatted.
if len(set(comparisons_lens)) == 1 and len(comparisons[0]) == 2:
comparisons_format = True
for line in comparisons:
line[0] = line[0].replace('-','_').replace(' ','_')
line[1] = line[1].replace('-', '_').replace(' ', '_')
if not line[0] in groups or not line[1] in groups:
comparisons_format = False
else:
comparisons_format = False
#If the file is properly formatted, reformat it. Otherwise, generate an all-by-all file.
if comparisons_format:
print('Comparisons file properly formatted... Generating reformatted comparisons...')
reformat_comparisons(args.comparisons_file,artdeco_dir.preprocess_dir)
else:
print('Comparisons file not properly formatted... Generating all-by-all comparisons file...')
generate_comparisons(artdeco_dir.meta_file,artdeco_dir.preprocess_dir)
else:
print('Comparison file does not exist or not provided... Generating comparisons file...')
generate_comparisons(artdeco_dir.meta_file,artdeco_dir.preprocess_dir)
comparisons = [line.strip().split('\t') for line in open(artdeco_dir.comparisons_file).readlines()]
#Update files in ARTDeco directory.
if os.path.exists(artdeco_dir.meta_file):
artdeco_dir.set_diff_exp_output()
#Generate output files.
out_files = artdeco_dir.get_files(args.mode,os.path.exists(artdeco_dir.meta_file),args.overwrite)
if out_files:
print('ARTDeco will generate the following files:\n'+'\n'.join(out_files))
else:
print('All necessary files generated... Exiting...')
sys.exit(1)
#Update file structure.
artdeco_dir.update_dir_lists(out_files)
#Check if GTF file is needed for generating output.
if artdeco_dir.gtf_needed:
print('GTF file needed... Checking...')
if args.gtf_file and os.path.isfile(args.gtf_file):
print('GTF file exists...')
elif args.gtf_file:
print('User-supplied GTF file does not exist... Exiting...')
sys.exit(1)
else:
print('No GTF file supplied... Exiting...')
sys.exit(1)
#Check if BAM file formats are needed. If they are, check if the user has specified them. If the user has not,
#infer those formats. Summarize file if the format is needed.
if artdeco_dir.format_needed:
print('BAM file format needed... Checking... Will infer if not user-specified.')
if args.layout:
if str.lower(args.layout) in ['pe','se']:
infer_layout = False
if str.lower(args.layout) == 'pe':
print('BAM files specified as paired-end...')
pe = True
else:
print('BAM files specified as single-end...')
pe = False
else:
print('Improper layout specified... Will infer...')
infer_layout = True
else:
print('No layout specified... Will infer...')
infer_layout = True
if args.stranded:
if str.lower(args.stranded) in ['true','false']:
infer_strandedness = False
if str.lower(args.stranded) == 'true':
print('BAM files specified as stranded...')
stranded = True
else:
print('BAM files specified as unstranded...')
stranded = False
else:
print('Improper indication of strandedness...')
infer_strandedness = True
elif args.orientation and str.lower(args.orientation) in ['forward','reverse']:
print('No strandedness specified but strand orientation specified... Will assign data as stranded...')
infer_strandedness = False
stranded = True
else:
print('No strandedness specified... Will infer...')
infer_strandedness = True
if args.orientation:
if str.lower(args.orientation) in ['forward','reverse']:
infer_orientation = False
if str.lower(args.orientation) == 'forward':
print('BAM files specified as forward-strand oriented...')
flip = False
else:
print('BAM files specified as reverse-strand oriented...')
flip = True
else:
print('Improper strand orientation specified... Will infer...')
infer_orientation = True
elif not infer_strandedness:
if stranded:
print('No strand orientation specified... Data is stranded... Will infer orientation...')
infer_orientation = True
else:
print('No strand orientation specified... Data is unstranded... No need to infer orientation...')
infer_orientation = False
flip = False
else:
print('No strand orientation specified... Will infer...')
infer_orientation = True
#Infer layout if necessary.
if infer_layout or infer_strandedness or infer_orientation:
print('Will infer BAM formats...')
#Check if full genes BED exists. If it doesn't, regenerate it.
if os.path.isfile(artdeco_dir.genes_full) and not args.overwrite:
print('Full genes BED file exists...')
else:
print('Generating full genes BED file...')
parse_gtf(args.gtf_file,args.home_dir)
out_files -= {artdeco_dir.genes_full,artdeco_dir.genes_condensed,artdeco_dir.gene_to_transcript,
artdeco_dir.gene_types}
artdeco_dir.update_dir_lists(out_files)
#Check file formats.
print('Inferring BAM file formats...')
formats = infer_experiments_group(artdeco_dir.bam_files,artdeco_dir.genes_full,
min(args.cpu,len(artdeco_dir.bam_files)))
#Check layout.
if infer_layout:
if len(set(x[1] for x in formats)) == 1:
pe = formats[0][1]
if pe:
print('All BAM files inferred as Paired-End...')
else:
print('All BAM files inferred as Single-End...')
else:
print('Error... One or more files do not match in inferred format... Exiting...')
for f in formats:
out_str = f'BAM file {f[0]} inferred as '+output_inferred_format(f)
print(out_str)
sys.exit(1)
#Check strandedness.
if infer_strandedness:
if len(set(x[2] for x in formats)) == 1:
stranded = formats[0][2]
if stranded:
print('All BAM files inferred as strand-specific...')
else:
print('All BAM files inferred as single-stranded...')
else:
print('Error... One or more files do not match in inferred format... Exiting...')
for f in formats:
out_str = f'BAM file {f[0]} inferred as '+output_inferred_format(f)
print(out_str)
sys.exit(1)
#Check strand orientation.
if infer_orientation:
if len(set(x[3] for x in formats)) == 1:
flip = formats[0][3]
if flip:
print('All BAM files inferred as reverse-strand oriented...')
else:
print('All BAM files inferred as forward-strand oriented...')
else:
print('Error... One or more files do not match in inferred format... Exiting...')
for f in formats:
out_str = f'BAM file {f[0]} inferred as '+output_inferred_format(f)
print(out_str)
sys.exit(1)
#Summarize files.
if args.skip_bam_summary:
print('Skipping summary of BAM file stats...')
else:
print('Summarizing BAM file stats...')
summary_file = os.path.join(artdeco_dir.summary_dir,'bam_summary.txt')
if os.path.isfile(summary_file):
os.remove(summary_file)
summary = summarize_bam_files(artdeco_dir.bam_files,args.cpu,pe,stranded,flip)
for line in summary.split('\n'):
print(line)
with open(summary_file,'w') as f:
f.write(summary)
#Load chromosome sizes file if needed.
if artdeco_dir.chrom_sizes_needed:
if args.chrom_sizes_file and os.path.isfile(args.chrom_sizes_file):
chrom_sizes_file = open(args.chrom_sizes_file)
chrom_sizes = {}
for line in chrom_sizes_file.readlines():
line = line.strip().split('\t')
chrom_sizes[line[0]] = int(line[1])
elif args.chrom_sizes_file:
print('Chromosome sizes file does not exist... Exiting...')
sys.exit(1)
else:
print('No chromosome sizes file supplied... Exiting...')
sys.exit(1)
#Generate preprocessing files.
if artdeco_dir.preprocessing_files:
if not os.path.isdir(artdeco_dir.preprocess_dir):
os.mkdir(artdeco_dir.preprocess_dir)
if artdeco_dir.preprocessing_files & {artdeco_dir.genes_condensed,artdeco_dir.genes_full,
artdeco_dir.gene_to_transcript,artdeco_dir.gene_types}:
parse_gtf(args.gtf_file,args.home_dir)
#Load genes.
genes = pd.read_csv(artdeco_dir.genes_condensed,sep='\t',header=None,
names=['Chrom','Start','Stop','Name','Score','Strand'])
del genes['Score']
genes = genes[['Name','Chrom','Strand','Start','Stop']]
#Create read-in BED file.
if artdeco_dir.read_in_bed in artdeco_dir.preprocessing_files:
print('Generating read-in region BED file...')
if stranded:
read_in_df = create_stranded_read_in_df(genes,chrom_sizes,max_len=args.intergenic_max_len,
min_len=args.intergenic_min_len,upstream_dist=args.read_in_dist)
else:
read_in_df = create_unstranded_read_in_df(genes,chrom_sizes,max_len=args.intergenic_max_len,
min_len=args.intergenic_min_len,
upstream_dist=args.read_in_dist)
#Format and create BED file.
read_in_df['Score'] = 0
read_in_df = read_in_df[['Chrom','Start','Stop','Name','Score','Strand']]
read_in_df['Start'] = read_in_df['Start'].astype(int)
read_in_df['Stop'] = read_in_df['Stop'].astype(int)
#Output BED file.
read_in_df.to_csv(artdeco_dir.read_in_bed,sep='\t',header=False,index=False)
#Create readthrough BED file.
if artdeco_dir.readthrough_bed in artdeco_dir.preprocessing_files:
print('Generating readthrough region BED file...')
if stranded:
readthrough_df = create_stranded_downstream_df(genes,chrom_sizes,max_len=args.intergenic_max_len,
min_len=args.intergenic_min_len,
downstream_dist=args.readthrough_dist)
else:
readthrough_df = create_unstranded_downstream_df(genes,chrom_sizes,max_len=args.intergenic_max_len,
min_len=args.intergenic_min_len,
downstream_dist=args.readthrough_dist)
readthrough_df['Score'] = 0
readthrough_df = readthrough_df[['Chrom','Start','Stop','Name','Score','Strand']]
readthrough_df['Start'] = readthrough_df['Start'].astype(int)
readthrough_df['Stop'] = readthrough_df['Stop'].astype(int)
readthrough_df.to_csv(artdeco_dir.readthrough_bed,sep='\t',header=False,index=False)
#Creating tag directories.
if artdeco_dir.tag_dirs:
print('Creating tag directories...')
make_multi_tag_dirs([artdeco_dir.tag_dir_to_bam[tag_dir] for tag_dir in artdeco_dir.tag_dirs],
artdeco_dir.preprocess_dir,flip,pe,stranded,args.single,min(len(artdeco_dir.tag_dirs),args.cpu))
#Generate quantification files.
if artdeco_dir.quantification_files:
if not os.path.isdir(artdeco_dir.quantification_dir):
print('Creating quantification directory...')
os.mkdir(artdeco_dir.quantification_dir)
if artdeco_dir.quantification_files & {artdeco_dir.gene_fpkm,artdeco_dir.gene_raw}:
print('Generating gene expression files...')
get_multi_gene_exp(artdeco_dir.all_tag_dirs,args.gtf_file,stranded,artdeco_dir.quantification_dir,args.cpu)
if artdeco_dir.max_isoform in artdeco_dir.quantification_files:
print('Getting maximum isoform...')
get_max_isoform(artdeco_dir.gene_fpkm,artdeco_dir.gene_to_transcript,artdeco_dir.quantification_dir)
if artdeco_dir.read_in_exp in artdeco_dir.quantification_files:
print('Generating read-in expression file...')
get_regions_exp((artdeco_dir.all_tag_dirs,artdeco_dir.read_in_bed,stranded,'-raw',
artdeco_dir.quantification_dir,min(len(artdeco_dir.all_tag_dirs),args.cpu)))
if artdeco_dir.readthrough_exp in artdeco_dir.quantification_files:
print('Generating readthrough expression file...')
get_regions_exp((artdeco_dir.all_tag_dirs, artdeco_dir.readthrough_bed,stranded,'-raw',
artdeco_dir.quantification_dir,min(len(artdeco_dir.all_tag_dirs),args.cpu)))
#Generate readthrough files.
if artdeco_dir.readthrough_files:
if not os.path.isdir(artdeco_dir.readthrough_dir):
print('Creating readthrough directory...')
os.mkdir(artdeco_dir.readthrough_dir)
if artdeco_dir.read_in_levels in artdeco_dir.readthrough_files:
print('Generate read-in vs. expression file...')
get_gene_v_intergenic(artdeco_dir.gene_raw,artdeco_dir.gene_fpkm,artdeco_dir.max_isoform,
artdeco_dir.read_in_exp,'Read-In',artdeco_dir.read_in_levels)
if artdeco_dir.corrected_exp in artdeco_dir.readthrough_files:
print('Correcting gene expression using read-in information...')
deconvolute_exp(artdeco_dir.read_in_levels,artdeco_dir.corrected_exp)
if artdeco_dir.readthrough_levels in artdeco_dir.readthrough_files:
print('Generate readthrough vs. expression file...')
get_gene_v_intergenic(artdeco_dir.gene_raw,artdeco_dir.gene_fpkm,artdeco_dir.max_isoform,
artdeco_dir.readthrough_exp,'Readthrough',artdeco_dir.readthrough_levels)
if artdeco_dir.read_in_assignments in artdeco_dir.readthrough_files:
print(f'Read-in genes assigned with read-in level threshold is {args.read_in_threshold} and read-in FPKM '+\
f'threshold is {args.read_in_fpkm}...')
if args.gene_types and len(open(artdeco_dir.gene_types).readlines()) > 1:
print('Using the following gene types: ' + ', '.join(args.gene_types) + '...')
else:
args.gene_types = None
print('Using all genes...')
assign_genes(artdeco_dir.read_in_levels,args.read_in_threshold,args.read_in_fpkm,
artdeco_dir.read_in_assignments,artdeco_dir.gene_types,args.gene_types)
#Summarize output.
print('Summarizing readthrough output...')
if args.gene_types and len(open(artdeco_dir.gene_types).readlines()) > 1:
print('Using the following gene types: '+', '.join(args.gene_types)+'...')
else:
args.gene_types = None
print('Using all genes...')
summary_file = os.path.join(artdeco_dir.summary_dir,'readthrough_summary.txt')
if os.path.isfile(summary_file):
os.remove(summary_file)
expts = []
for f in os.listdir(artdeco_dir.bam_files_dir):
if f[-4:] == '.bam':
expt = f[:-4].replace('-', '_').replace(' ', '_')
expts.append(expt)
summary = summarize_readthrough_stats(artdeco_dir.read_in_levels,expts,'Read-In',
args.summary_genes,artdeco_dir.gene_types,args.gene_types)
if os.path.isfile(artdeco_dir.readthrough_levels):
summary += '\n'+summarize_readthrough_stats(artdeco_dir.readthrough_levels,expts,'Readthrough',
args.summary_genes,artdeco_dir.gene_types,args.gene_types)
summary += '\n'+summarize_read_in_assignments(artdeco_dir.read_in_assignments,expts,args.read_in_threshold,
args.read_in_fpkm)
for line in summary.split('\n'):
print(line)
with open(summary_file,'w') as f:
f.write(summary)
#Generate DoG files.
if artdeco_dir.dogs_files:
if not os.path.isdir(artdeco_dir.dogs_dir):
print('Creating DoG output directory...')
os.mkdir(artdeco_dir.dogs_dir)
if artdeco_dir.dogs_beds:
print(f'Finding DoGs...\nGet genes with potential DoGs with minimum length of {args.min_dog_len} bp, a '+
f'minimum coverage of {args.min_dog_coverage} FPKM, and screening window of {args.dog_window} bp...')
screening_genes = get_dog_screening(artdeco_dir.genes_condensed,args.min_dog_len)
print(f'Generate initial screening BED file for DoGs with minimum length {args.min_dog_len} bp and window'+
f' size {args.dog_window} bp...')
generate_screening_bed(screening_genes,args.min_dog_len,args.dog_window,args.home_dir)
print(f'Initial screening coverage for DoGs with minimum length of {args.min_dog_len} bp...')
#Screen for coverage threshold.
dogs_tag_dirs = [artdeco_dir.dogs_bed_to_tagdir[dogs_bed] for dogs_bed in artdeco_dir.dogs_beds]
screening_coverage_dfs = get_multi_interval_coverage(dogs_tag_dirs,args.home_dir,
[os.path.join(args.home_dir,'intervals.bed')],
args.min_dog_coverage,stranded,
min(args.cpu,len(dogs_tag_dirs)))
#Find genes that pass threshold for minimum length.
screening_genes_dfs = {}
for i in range(len(screening_coverage_dfs)):
new_df = screening_coverage_dfs[i].copy()
new_df = new_df.groupby('Name').count()
new_df = new_df[new_df.ID == args.min_dog_len/args.dog_window]
screening_genes_dfs[dogs_tag_dirs[i]] = screening_genes[screening_genes.Name.isin(new_df.index)].copy()
#Remove screening BED files.
os.remove(os.path.join(args.home_dir,'intervals.bed'))
print('Generate screening BED file for pre-screened DoGs...')
generate_full_screening_bed(dogs_tag_dirs,artdeco_dir.genes_condensed,screening_genes_dfs,
artdeco_dir.read_in_assignments,args.chrom_sizes_file,args.dog_window,args.cpu,
args.home_dir)
print('Screening coverage for pre-screened DoGs...')
#Screen for coverage threshold.
screening_coverage_dfs = get_multi_interval_coverage(dogs_tag_dirs,args.home_dir,
[os.path.join(args.home_dir,
tag_dir.split('/')[-1]+'.bed') for
tag_dir in dogs_tag_dirs],args.min_dog_coverage,
stranded,min(args.cpu,len(dogs_tag_dirs)))
#Remove screening BED files.
for tag_dir in dogs_tag_dirs:
expt_name = tag_dir.split('/')[-1]
os.remove(os.path.join(args.home_dir,f'{expt_name}.bed'))
print('Discovering DoG coordinates for pre-screened DoGs and output BED files...')
get_multi_dog_beds(screening_genes,screening_coverage_dfs,args.dog_window,args.cpu,dogs_tag_dirs,
artdeco_dir.dogs_dir)
if artdeco_dir.all_dogs_bed in artdeco_dir.dogs_files or artdeco_dir.dogs_beds:
print('Merge DoGs into a single annotation...')
merge_dogs(artdeco_dir.all_dogs_beds,artdeco_dir.dogs_dir)
if artdeco_dir.dogs_raw & artdeco_dir.dogs_files or artdeco_dir.dogs_fpkm & artdeco_dir.dogs_files:
print('Generating expression data for DoGs in individual experiments...')
get_dog_exp(artdeco_dir.all_tag_dirs,artdeco_dir.all_dogs_beds,stranded,artdeco_dir.dogs_dir,args.cpu)
if artdeco_dir.all_dogs_fpkm in artdeco_dir.dogs_files or artdeco_dir.dogs_beds:
print('Generating expression data in FPKM for all DoGs...')
get_regions_exp((artdeco_dir.all_tag_dirs,artdeco_dir.all_dogs_bed,stranded,'-fpkm',artdeco_dir.dogs_dir,
min(args.cpu,len(artdeco_dir.all_tag_dirs))))
if artdeco_dir.all_dogs_raw in artdeco_dir.dogs_files or artdeco_dir.dogs_beds:
print('Generating raw expression data for all DoGs...')
get_regions_exp((artdeco_dir.all_tag_dirs,artdeco_dir.all_dogs_bed,stranded,'-raw',artdeco_dir.dogs_dir,
min(args.cpu,len(artdeco_dir.all_tag_dirs))))
#Summarize DoG files.
summary_file = os.path.join(artdeco_dir.summary_dir,'dogs_summary.txt')
if os.path.isfile(summary_file):
os.remove(summary_file)
summary = summarize_all_dogs(artdeco_dir.all_dogs_bed,artdeco_dir.all_dogs_beds,artdeco_dir.all_dogs_fpkm,
artdeco_dir.all_dogs_fpkm_expts,args.min_dog_len,args.min_dog_coverage,
args.dog_window)
for line in summary.split('\n'):
print(line)
with open(summary_file,'w') as f:
f.write(summary)
#Generate differential expression output.
try:
if artdeco_dir.diff_exp_files:
if not os.path.isdir(artdeco_dir.diff_exp_dir):
print('Creating differential expression output directory...')
os.mkdir(artdeco_dir.diff_exp_dir)
print('Running DESeq2 on gene expression data...')
dds = load_deseq_dataset(artdeco_dir.gene_raw,artdeco_dir.meta_file)
dds_results = run_deseq(dds)
#Output results.
print('Output DESeq2 results...')
for condition1, condition2 in comparisons:
deseq_results(dds_results,condition1,condition2,artdeco_dir.diff_exp_dir)
except:
pass
#Generate differential expression with read-in information.
try:
if artdeco_dir.diff_exp_read_in_files:
if not os.path.isdir(artdeco_dir.diff_exp_read_in_dir):
print('Creating differential expression with read-in information directory...')
os.mkdir(artdeco_dir.diff_exp_read_in_dir)
#Join differential expression information to read-in information. Use this to infer read-in genes.
print('Combining differential expression results and read-in information... Inferring read-in genes for '+\
f'upregulated genes with log2 fold change > {args.log2FC}, p-value < {args.pval}, and FPKM > '+\
f'{args.read_in_fpkm}... Read-in level threshold is {args.read_in_threshold}...')
if args.gene_types and len(open(artdeco_dir.gene_types).readlines()) > 1:
print('Using the following gene types: ' + ', '.join(args.gene_types) + '...')
else:
args.gene_types = None
print('Using all genes...')
for condition1,condition2 in comparisons:
read_in_diff_exp(artdeco_dir.read_in_levels,artdeco_dir.meta_file,
os.path.join(artdeco_dir.diff_exp_dir,f'{condition1}-{condition2}-results.txt'),
artdeco_dir.diff_exp_read_in_dir)
assign_read_in_genes(os.path.join(artdeco_dir.diff_exp_read_in_dir,
f'{condition1}-{condition2}-read_in.txt'),args.log2FC,args.pval,
args.read_in_fpkm,args.read_in_threshold,artdeco_dir.gene_types,args.gene_types,
artdeco_dir.diff_exp_read_in_dir)
#Summarize output.
print('Summarize read-in gene inference with differential expression information...')
summary_file = os.path.join(artdeco_dir.summary_dir,'diff_exp_read_in_summary.txt')
if os.path.isfile(summary_file):
os.remove(summary_file)
assignment_files = []
for condition1,condition2 in comparisons:
assignment_files.append(os.path.join(artdeco_dir.diff_exp_read_in_dir,
f'{condition1}-{condition2}-read_in_assignment.txt'))
summary = summarize_diff_exp_read_in_assignments(assignment_files,args.log2FC,args.pval,args.read_in_fpkm,
args.read_in_threshold)
for line in summary.split('\n'):
print(line)
with open(summary_file,'w') as f:
f.write(summary)
except:
pass
#Generate differential expression output for DoGs.
try:
if artdeco_dir.diff_exp_dogs_files:
if not os.path.isdir(artdeco_dir.diff_exp_dogs_dir):
print('Creating differential expression for DoGs directory...')
os.mkdir(artdeco_dir.diff_exp_dogs_dir)
print('Running DESeq2 on DoGs...')
dds = load_deseq_dataset(artdeco_dir.all_dogs_raw,artdeco_dir.meta_file)
dds_results = run_deseq(dds)
#Output results.
print('Output DESeq2 results...')
for condition1,condition2 in comparisons:
deseq_results(dds_results,condition1,condition2,artdeco_dir.diff_exp_dogs_dir)
except:
pass
if __name__ == '__main__':
main() | 50.351351 | 124 | 0.600134 |
de55d4438511c2128b11627b1d5374a495e0798b | 735 | py | Python | garagem/migrations/0001_initial.py | araujo88/minhaGaragem | 31fb16a686eef2caa26e194c03a0528e43867188 | [
"MIT"
] | null | null | null | garagem/migrations/0001_initial.py | araujo88/minhaGaragem | 31fb16a686eef2caa26e194c03a0528e43867188 | [
"MIT"
] | null | null | null | garagem/migrations/0001_initial.py | araujo88/minhaGaragem | 31fb16a686eef2caa26e194c03a0528e43867188 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-16 04:28
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Veiculo',
fields=[
('tipo', models.CharField(choices=[('carro', 'Carro'), ('moto', 'Moto')], max_length=5)),
('cor', models.CharField(max_length=50)),
('modelo', models.CharField(max_length=50)),
('ano', models.IntegerField(max_length=4)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
],
),
]
| 28.269231 | 125 | 0.567347 |
078566f13ce531287701b07b35096a1713e3fe65 | 6,576 | py | Python | 3.3.2/gdal-utils/osgeo_utils/gdal_fillnodata.py | benjamintd/pygdal | 1cd86a248f944e2ab28a818c3648b7d7c065e252 | [
"MIT"
] | 1 | 2020-11-13T09:22:12.000Z | 2020-11-13T09:22:12.000Z | flusstools/osgeo_utils/gdal_fillnodata.py | Ecohydraulics/flusstools | ab356788846dee089af146e924822dfafd096828 | [
"BSD-3-Clause"
] | null | null | null | flusstools/osgeo_utils/gdal_fillnodata.py | Ecohydraulics/flusstools | ab356788846dee089af146e924822dfafd096828 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# ******************************************************************************
# $Id$
#
# Project: GDAL Python Interface
# Purpose: Application for filling nodata areas in a raster by interpolation
# Author: Frank Warmerdam, [email protected]
#
# ******************************************************************************
# Copyright (c) 2008, Frank Warmerdam
# Copyright (c) 2009-2011, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import sys
from osgeo import gdal
def CopyBand(srcband, dstband):
for line in range(srcband.YSize):
line_data = srcband.ReadRaster(0, line, srcband.XSize, 1)
dstband.WriteRaster(0, line, srcband.XSize, 1, line_data,
buf_type=srcband.DataType)
def Usage():
print("""gdal_fillnodata [-q] [-md max_distance] [-si smooth_iterations]
[-o name=value] [-b band]
srcfile [-nomask] [-mask filename] [-of format] [-co name=value]* [dstfile]""")
return 1
def main(argv):
max_distance = 100
smoothing_iterations = 0
options = []
quiet_flag = 0
src_filename = None
src_band = 1
dst_filename = None
frmt = 'GTiff'
creation_options = []
mask = 'default'
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return 0
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-of' or arg == '-f':
i = i + 1
frmt = argv[i]
elif arg == '-co':
i = i + 1
creation_options.append(argv[i])
elif arg == '-q' or arg == '-quiet':
quiet_flag = 1
elif arg == '-si':
i = i + 1
smoothing_iterations = int(argv[i])
elif arg == '-b':
i = i + 1
src_band = int(argv[i])
elif arg == '-md':
i = i + 1
max_distance = float(argv[i])
elif arg == '-nomask':
mask = 'none'
elif arg == '-mask':
i = i + 1
mask = argv[i]
elif arg == '-mask':
i = i + 1
mask = argv[i]
elif arg[:2] == '-h':
return Usage()
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
else:
return Usage()
i = i + 1
if src_filename is None:
return Usage()
# =============================================================================
# Verify we have next gen bindings with the sievefilter method.
# =============================================================================
try:
gdal.FillNodata
except AttributeError:
print('')
print('gdal.FillNodata() not available. You are likely using "old gen"')
print('bindings or an older version of the next gen bindings.')
print('')
return 1
# =============================================================================
# Open source file
# =============================================================================
if dst_filename is None:
src_ds = gdal.Open(src_filename, gdal.GA_Update)
else:
src_ds = gdal.Open(src_filename, gdal.GA_ReadOnly)
if src_ds is None:
print('Unable to open %s' % src_filename)
return 1
srcband = src_ds.GetRasterBand(src_band)
# =============================================================================
# Create output file if one is specified.
# =============================================================================
if dst_filename is not None:
drv = gdal.GetDriverByName(frmt)
dst_ds = drv.Create(dst_filename, src_ds.RasterXSize, src_ds.RasterYSize, 1,
srcband.DataType, creation_options)
wkt = src_ds.GetProjection()
if wkt != '':
dst_ds.SetProjection(wkt)
gt = src_ds.GetGeoTransform(can_return_null=True)
if gt:
dst_ds.SetGeoTransform(gt)
dstband = dst_ds.GetRasterBand(1)
ndv = srcband.GetNoDataValue()
if ndv is not None:
dstband.SetNoDataValue(ndv)
color_interp = srcband.GetColorInterpretation()
dstband.SetColorInterpretation(color_interp)
if color_interp == gdal.GCI_PaletteIndex:
color_table = srcband.GetColorTable()
dstband.SetColorTable(color_table)
CopyBand(srcband, dstband)
else:
dstband = srcband
# =============================================================================
# Invoke algorithm.
# =============================================================================
if quiet_flag:
prog_func = None
else:
prog_func = gdal.TermProgress_nocb
if mask == 'default':
maskband = dstband.GetMaskBand()
elif mask == 'none':
maskband = None
else:
mask_ds = gdal.Open(mask)
maskband = mask_ds.GetRasterBand(1)
result = gdal.FillNodata(dstband, maskband,
max_distance, smoothing_iterations, options,
callback=prog_func)
src_ds = None
dst_ds = None
mask_ds = None
return result
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 30.586047 | 95 | 0.517336 |
9abded428ecc595262538c22fb751686427a9593 | 512 | py | Python | requests/__version__.py | AbishiekJay/MyRequest | dd9d8d2f401b29e91deea9e36875d8660e773170 | [
"Apache-2.0"
] | null | null | null | requests/__version__.py | AbishiekJay/MyRequest | dd9d8d2f401b29e91deea9e36875d8660e773170 | [
"Apache-2.0"
] | null | null | null | requests/__version__.py | AbishiekJay/MyRequest | dd9d8d2f401b29e91deea9e36875d8660e773170 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# .-. .-. .-. . . .-. .-. .-. .-.
# |( |- |.| | | |- `-. | `-.
# ' ' `-' `-`.`-' `-' `-' ' `-'
__title__ = 'requests'
__description__ = 'Python HTTP for Humans.'
__url__ = 'http://python-requests.org'
__version__ = '2.14.2'
__build__ = 0x021402
__author__ = 'Kenneth Reitz'
__author_email__ = '[email protected]'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2017 Kenneth Reitz'
__cake__ = u'✨ 🍰 ✨ Thanks for using my software. It means the world to me. --kennethreitz' | 32 | 90 | 0.580078 |
9a5c1f68a20ae77f851b7c77808363e8f8cfadda | 1,714 | py | Python | swap_in/clothes/models/clothes.py | Swap-in/swap.in_master | 22b052749bcaf779cde709fd8dde6129715d0314 | [
"MIT"
] | null | null | null | swap_in/clothes/models/clothes.py | Swap-in/swap.in_master | 22b052749bcaf779cde709fd8dde6129715d0314 | [
"MIT"
] | null | null | null | swap_in/clothes/models/clothes.py | Swap-in/swap.in_master | 22b052749bcaf779cde709fd8dde6129715d0314 | [
"MIT"
] | null | null | null | # Django
from django.db import models
# Models
from swap_in.users.models import User
from swap_in.utils.models import SwapinModel
from .categories import category
TYPE_GENDER = [
("FEMALE", "FEMALE"),
("MALE", "MALE"),
("UNISEX", "UNISEX")
]
class Clothes(SwapinModel):
"""Clothes Model."""
title = models.CharField(
max_length=150,
null=False
)
description = models.CharField(
max_length=500,
null=False
)
category_id = models.ForeignKey(
category,
related_name='category',
on_delete=models.CASCADE,
null=False
)
size = models.CharField(
max_length=20,
null=False
)
gender = models.CharField(
max_length=8,
choices=TYPE_GENDER
)
user_id = models.ForeignKey(
User,
related_name='clothes',
on_delete=models.CASCADE
)
brand = models.CharField(
max_length=100,
blank=True,
null=False
)
picture_1 = models.CharField(
max_length=500,
blank=False,
null=False
)
picture_2 = models.CharField(
max_length=500,
blank=True,
null=True
)
picture_3 = models.CharField(
max_length=500,
blank=True,
null=True
)
picture_4 = models.CharField(
max_length=500,
blank=True,
null=True
)
picture_5 = models.CharField(
max_length=500,
blank=True,
null=True
)
| 23.162162 | 44 | 0.507585 |
7aa78d118bc9bd0b2d8da4477f5af1366ad98597 | 2,107 | py | Python | project_name/settings.py | joshvillbrandt/django-quick-start-project | e7ddb6f80a2a36f12f3a5415aef49a3dcd703888 | [
"Apache-2.0"
] | 2 | 2017-09-30T14:45:40.000Z | 2018-03-10T13:25:42.000Z | project_name/settings.py | joshvillbrandt/django-quick-start-project | e7ddb6f80a2a36f12f3a5415aef49a3dcd703888 | [
"Apache-2.0"
] | null | null | null | project_name/settings.py | joshvillbrandt/django-quick-start-project | e7ddb6f80a2a36f12f3a5415aef49a3dcd703888 | [
"Apache-2.0"
] | 1 | 2022-02-13T09:40:56.000Z | 2022-02-13T09:40:56.000Z | """
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'{{ project_name }}',
#'app_name',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
| 24.788235 | 86 | 0.719032 |
447263db6850010a7828305b60c3bee72703dde1 | 3,784 | py | Python | tensor2tensor/data_generators/quora_qpairs.py | jaseweir/tensor2tensor | 2a33b152d7835af66a6d20afe7961751047e28dd | [
"Apache-2.0"
] | 12,921 | 2017-06-15T17:11:46.000Z | 2022-03-31T15:22:11.000Z | tensor2tensor/data_generators/quora_qpairs.py | jaseweir/tensor2tensor | 2a33b152d7835af66a6d20afe7961751047e28dd | [
"Apache-2.0"
] | 1,635 | 2017-06-18T15:29:27.000Z | 2022-03-19T20:35:23.000Z | tensor2tensor/data_generators/quora_qpairs.py | jaseweir/tensor2tensor | 2a33b152d7835af66a6d20afe7961751047e28dd | [
"Apache-2.0"
] | 3,521 | 2017-06-15T18:25:42.000Z | 2022-03-31T05:47:55.000Z | # coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for the Quora Question Pairs dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import zipfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
EOS = text_encoder.EOS
@registry.register_problem
class QuoraQuestionPairs(text_problems.TextConcat2ClassProblem):
"""Quora duplicate question pairs binary classification problems."""
# Link to data from GLUE: https://gluebenchmark.com/tasks
_QQP_URL = ("https://firebasestorage.googleapis.com/v0/b/"
"mtl-sentence-representations.appspot.com/o/"
"data%2FQQP.zip?alt=media&token=700c6acf-160d-"
"4d89-81d1-de4191d02cb5")
@property
def is_generate_per_split(self):
return True
@property
def dataset_splits(self):
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 100,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
@property
def approx_vocab_size(self):
return 2**15
@property
def num_classes(self):
return 2
def class_labels(self, data_dir):
del data_dir
return ["not_duplicate", "duplicate"]
def _maybe_download_corpora(self, tmp_dir):
qqp_filename = "QQP.zip"
qqp_finalpath = os.path.join(tmp_dir, "QQP")
if not tf.gfile.Exists(qqp_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, qqp_filename, self._QQP_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return qqp_finalpath
def example_generator(self, filename):
skipped = 0
for idx, line in enumerate(tf.gfile.Open(filename, "rb")):
if idx == 0: continue # skip header
line = text_encoder.to_unicode_utf8(line.strip())
split_line = line.split("\t")
if len(split_line) < 6:
skipped += 1
tf.logging.info("Skipping %d" % skipped)
continue
s1, s2, l = split_line[3:]
# A neat data augmentation trick from Radford et al. (2018)
# https://blog.openai.com/language-unsupervised/
inputs = [[s1, s2], [s2, s1]]
for inp in inputs:
yield {
"inputs": inp,
"label": int(l)
}
def generate_samples(self, data_dir, tmp_dir, dataset_split):
qqp_dir = self._maybe_download_corpora(tmp_dir)
if dataset_split == problem.DatasetSplit.TRAIN:
filesplit = "train.tsv"
else:
filesplit = "dev.tsv"
filename = os.path.join(qqp_dir, filesplit)
for example in self.example_generator(filename):
yield example
@registry.register_problem
class QuoraQuestionPairsCharacters(QuoraQuestionPairs):
"""Quora duplicate question pairs classification problems, character level"""
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
def global_task_id(self):
return problem.TaskID.EN_SIM
| 30.516129 | 79 | 0.707452 |
032bf77c610e3d4eb6da5358889a46fd510bc4b8 | 276 | py | Python | pwnlib/filesystem/path.py | tkmikan/pwntools | 1238fc359eb72313d3f82849b2effdb7063ab429 | [
"MIT"
] | 8,966 | 2015-01-02T11:58:14.000Z | 2022-03-31T21:19:56.000Z | pwnlib/filesystem/path.py | tkmikan/pwntools | 1238fc359eb72313d3f82849b2effdb7063ab429 | [
"MIT"
] | 1,401 | 2015-01-01T00:56:22.000Z | 2022-03-31T16:19:53.000Z | pwnlib/filesystem/path.py | tkmikan/pwntools | 1238fc359eb72313d3f82849b2effdb7063ab429 | [
"MIT"
] | 1,844 | 2015-01-07T04:38:06.000Z | 2022-03-30T03:54:46.000Z | import six
import tempfile
if six.PY3:
from pathlib import *
else:
from pathlib2 import *
@classmethod
def mktemp(cls):
return cls(tempfile.mktemp())
@classmethod
def mkdtemp(cls):
return cls(tempfile.mkdtemp())
Path.mktemp = mktemp
Path.mkdtemp = mkdtemp
| 14.526316 | 34 | 0.717391 |
a8615309cb4ec296ecee2675dc364629291dd52b | 506 | py | Python | com/blueberr/python/study/day1/interaction.py | CoderHongKong/python-study | 4e13359e3546b67d555a79adee63422cac7968c2 | [
"Apache-2.0"
] | null | null | null | com/blueberr/python/study/day1/interaction.py | CoderHongKong/python-study | 4e13359e3546b67d555a79adee63422cac7968c2 | [
"Apache-2.0"
] | null | null | null | com/blueberr/python/study/day1/interaction.py | CoderHongKong/python-study | 4e13359e3546b67d555a79adee63422cac7968c2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name:
# Purpose:
#
# Author: hekai
#-------------------------------------------------------------------------------
name = input("name:")
pwd = input("pwd:")
age = int(input("age:"))
print(type(age))
print(name, pwd)
info = '''
age:%d
''' %(age)
print(info)
info = '''
age:{age}
'''.format(age = age)
print(info)
info = '''
age:{0}
'''.format(age)
print(info)
| 16.322581 | 80 | 0.367589 |
c2ef9b9232407070c45955419539f04d3b2db128 | 2,285 | py | Python | src/deepproblog/examples/neurogta/train_on_more_data.py | vossenwout/gtadeepproblog | 65509b740518af422b96e84ef10716e0ac246e75 | [
"Apache-2.0"
] | null | null | null | src/deepproblog/examples/neurogta/train_on_more_data.py | vossenwout/gtadeepproblog | 65509b740518af422b96e84ef10716e0ac246e75 | [
"Apache-2.0"
] | null | null | null | src/deepproblog/examples/neurogta/train_on_more_data.py | vossenwout/gtadeepproblog | 65509b740518af422b96e84ef10716e0ac246e75 | [
"Apache-2.0"
] | null | null | null | import torch
from deepproblog.engines import ExactEngine
from deepproblog.examples.neurogta.data.dataset import train_dataset, test_dataset
from deepproblog.model import Model
from deepproblog.network import Network
from deepproblog.dataset import DataLoader
from deepproblog.engines import ExactEngine
from deepproblog.evaluate import get_confusion_matrix
from deepproblog.examples.neurogta.data.dataset import train_dataset, test_dataset
from deepproblog.model import Model
from deepproblog.network import Network
from deepproblog.train import train_model
from deepproblog.utils.standard_networks import smallnet, SmallNet, MLP
from deepproblog.utils.stop_condition import Threshold, StopOnPlateau
import numpy as np
import keyboard
import torchvision.transforms as transforms
import cv2
import time
import os
from Xlib import display, X
from PIL import Image
from deepproblog.utils.standard_networks import smallnet, SmallNet
from deepproblog.query import Query
from problog.logic import Term, Constant
"""
THIS SCRIPT REQUIRES solver.solve() IN solver.py TO NOT CLEAR THE CACHE
REMEMBER TO UNDO THE COMMENT WHEN TRAINING A NEW MODEL
REMEMBER TO COMMENT WHEN RUNNING THIS SCRIPT
W
"""
key_dict = {0:"w", 4 : "w, a", 5 : "w, d"}
lr = 1e-4
gta_network1 = SmallNet(num_classes=3, N=10752)
gta_network2 = SmallNet(num_classes=3, N=768)
batch_size = 5
loader = DataLoader(train_dataset, batch_size)
gta_net1 = Network(gta_network1, "gta_net1", batching=True)
gta_net1.optimizer = torch.optim.Adam(gta_network1.parameters(), lr=lr)
gta_net2 = Network(gta_network2, "gta_net2", batching=True)
gta_net2.optimizer = torch.optim.Adam(gta_network2.parameters(), lr=lr)
model = Model("model5.pl", [gta_net1, gta_net2])
model.add_tensor_source("train", train_dataset)
model.add_tensor_source("test", test_dataset)
model.set_engine(ExactEngine(model), cache=True)
model.load_state("saved_models/gtamodel.pth")
train_obj = train_model(
model,
loader,
StopOnPlateau("Accuracy", warm_up=10, patience=10)
| Threshold("Accuracy", 1.0, duration=1),
log_iter=100 // batch_size,
test_iter=100 // batch_size,
test=lambda x: [("Accuracy", get_confusion_matrix(x, test_dataset).accuracy())],
infoloss=0.25,
)
model.save_state("saved_models/gtamodel.pth")
| 29.294872 | 84 | 0.791685 |
ad8a5eb792aae7ff19a44c7cfa95795f02fde078 | 789 | py | Python | built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/hubconf.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/hubconf.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/hubconf.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright [yyyy] [name of copyright owner]
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
dependencies = ['torch']
from timm.models import registry
globals().update(registry._model_entrypoints)
| 37.571429 | 78 | 0.721166 |
86e4b9137d76900952dc519cec28e05827550bf8 | 2,490 | py | Python | tests/test_parse_context.py | gubschk/CDEWIP | fb628593417df5f955eb1fa62176b7cb3c322ebc | [
"MIT"
] | null | null | null | tests/test_parse_context.py | gubschk/CDEWIP | fb628593417df5f955eb1fa62176b7cb3c322ebc | [
"MIT"
] | null | null | null | tests/test_parse_context.py | gubschk/CDEWIP | fb628593417df5f955eb1fa62176b7cb3c322ebc | [
"MIT"
] | 1 | 2021-02-21T02:51:39.000Z | 2021-02-21T02:51:39.000Z | # -*- coding: utf-8 -*-
"""
test_parse_apparatus
~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import unittest
from lxml import etree
from chemdataextractor.doc.text import Sentence, Paragraph
from chemdataextractor.parse.context import context_phrase
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class TestParseApparatus(unittest.TestCase):
"""Simple apparatus parse tests."""
maxDiff = None
def do_parse(self, input, expected):
s = Sentence(input)
log.debug(s)
log.debug(s.tagged_tokens)
results = []
for i, r in enumerate(context_phrase.scan(s.tagged_tokens)):
log.debug(etree.tostring(r[0], pretty_print=True, encoding='unicode'))
results.append(etree.tostring(r[0], encoding='unicode'))
self.assertEqual(expected, results)
def test_apparatus(self):
""""""
s = 'The photoluminescence quantum yield (PLQY) was measured using a HORIBA Jobin Yvon FluoroMax-4 spectrofluorimeter'
expected = ['<context_phrase><measurement><quantum_yield>photoluminescence quantum yield PLQY</quantum_yield></measurement><apparatus>HORIBA Jobin Yvon FluoroMax-4 spectrofluorimeter</apparatus></context_phrase>']
self.do_parse(s, expected)
def test_apparatus2(self):
""""""
s = '1H NMR spectra were recorded on a Varian MR-400 MHz instrument.'
expected = ['<context_phrase><measurement><nmr>1H</nmr></measurement><apparatus>Varian MR-400 MHz instrument</apparatus></context_phrase>']
self.do_parse(s, expected)
def test_apparatus_record(self):
""""""
p = Paragraph('The photoluminescence quantum yield (PLQY) was measured using a HORIBA Jobin Yvon FluoroMax-4 spectrofluorimeter.')
expected = [{'quantum_yields': [{'apparatus': u'HORIBA Jobin Yvon FluoroMax-4 spectrofluorimeter'}]}]
self.assertEqual(expected, [r.serialize() for r in p.records])
def test_apparatus_record2(self):
""""""
p = Paragraph('NMR was run on a 400 MHz Varian NMR.')
expected = [{'nmr_spectra': [{'apparatus': '400 MHz Varian NMR'}]}]
self.assertEqual(expected, [r.serialize() for r in p.records])
if __name__ == '__main__':
unittest.main()
| 35.571429 | 222 | 0.669478 |
23dcaad02972a1fdf94b45ba9fb8b7a9ecd6fc36 | 3,309 | py | Python | sopel_modules/sports/mlb.py | RustyBower/sopel-sports | 302333371a0d24e653978e18eac1c6796dca1992 | [
"MIT"
] | 1 | 2021-10-01T15:38:53.000Z | 2021-10-01T15:38:53.000Z | sopel_modules/sports/mlb.py | RustyBower/sopel-sports | 302333371a0d24e653978e18eac1c6796dca1992 | [
"MIT"
] | 8 | 2020-02-23T00:30:46.000Z | 2021-09-26T16:03:09.000Z | sopel_modules/sports/mlb.py | RustyBower/sopel-sports | 302333371a0d24e653978e18eac1c6796dca1992 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2019, Rusty Bower, rustybower.com
import arrow
import requests
from sopel.formatting import bold
from sopel.module import commands, example
def parse_games(date):
if date:
r = requests.get(
"https://statsapi.mlb.com/api/v1/schedule?sportId=1&date={}".format(date)
)
else:
r = requests.get("https://statsapi.mlb.com/api/v1/schedule?sportId=1")
reply = []
for date in r.json()["dates"]:
# TODO - Figure out what events and matches are
for game in date["games"]:
# Game Is Not Started
if game["status"]["abstractGameState"] == "Preview":
reply.append(
"{} @ {} {} Eastern".format(
game["teams"]["away"]["team"]["name"],
game["teams"]["home"]["team"]["name"],
# TODO - Allow users to specify timezone to return
arrow.get(game["gameDate"]).to("US/Eastern").format("HH:mm"),
)
)
elif game["status"]["abstractGameState"] == "Final":
# Away Team Win
if int(game["teams"]["away"]["score"]) > int(
game["teams"]["home"]["score"]
):
reply.append(
"{} {} {} {} Final".format(
bold(game["teams"]["away"]["team"]["name"]),
bold(str(game["teams"]["away"]["score"])),
game["teams"]["home"]["team"]["name"],
str(game["teams"]["home"]["score"]),
)
)
# Home Team Win
elif int(game["teams"]["home"]["score"]) > int(
game["teams"]["away"]["score"]
):
reply.append(
"{} {} {} {} Final".format(
game["teams"]["away"]["team"]["name"],
str(game["teams"]["away"]["score"]),
bold(game["teams"]["home"]["team"]["name"]),
bold(str(game["teams"]["home"]["score"])),
)
)
# Tie Game
else:
reply.append(
"{} {} {} {} Final".format(
game["teams"]["away"]["team"]["name"],
game["teams"]["away"]["score"],
game["teams"]["home"]["team"]["name"],
game["teams"]["home"]["score"],
)
)
return reply
@commands("mlb")
@example(".mlb")
@example(".mlb 2019-10-29")
def mlb(bot, trigger):
date = trigger.group(2) or None
# Get Game Data
reply = " | ".join(parse_games(date))
# Split if greater than 200 characters so we don't accidentally cut anything off
if len(reply) > 200:
length = int(len(reply.split(" | ")) / 2)
bot.reply(" | ".join(reply.split(" | ")[0:length]))
bot.reply(" | ".join(reply.split(" | ")[length:]))
return
else:
if reply:
return bot.reply(reply)
else:
return
| 37.602273 | 85 | 0.414929 |
9a452cd5aefae61e8f8734abb9e18f3e71576a07 | 1,218 | py | Python | pyvisdk/do/host_cnx_failed_no_connection_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/host_cnx_failed_no_connection_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/host_cnx_failed_no_connection_event.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostCnxFailedNoConnectionEvent(vim, *args, **kwargs):
'''This event records a failure to connect to a host due to a host not being
present on the network.'''
obj = vim.client.factory.create('{urn:vim25}HostCnxFailedNoConnectionEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 34.8 | 124 | 0.614943 |
6c63e1aa11dd355b3ea3fb2b6c70dd70ecef6fa9 | 88 | py | Python | tests/periodicities/Hour/Cycle_Hour_3200_H_30.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/periodicities/Hour/Cycle_Hour_3200_H_30.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/periodicities/Hour/Cycle_Hour_3200_H_30.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.periodicities.period_test as per
per.buildModel((30 , 'H' , 3200));
| 17.6 | 50 | 0.727273 |
aff9983d1697103bd24ad5c778ae7a2ff1cb41e5 | 9,603 | py | Python | zerver/tests/test_alert_words.py | sa2c/zulip | a00d911ed1071e6a8bbaa17d8df9e96115973588 | [
"Apache-2.0"
] | 1 | 2021-05-15T00:44:42.000Z | 2021-05-15T00:44:42.000Z | zerver/tests/test_alert_words.py | sa2c/zulip | a00d911ed1071e6a8bbaa17d8df9e96115973588 | [
"Apache-2.0"
] | 1 | 2019-12-24T06:51:52.000Z | 2019-12-24T06:51:52.000Z | zerver/tests/test_alert_words.py | sa2c/zulip | a00d911ed1071e6a8bbaa17d8df9e96115973588 | [
"Apache-2.0"
] | 1 | 2021-07-22T10:14:08.000Z | 2021-07-22T10:14:08.000Z | import orjson
from zerver.lib.actions import do_add_alert_words, do_remove_alert_words
from zerver.lib.alert_words import alert_words_in_realm, user_alert_words
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import most_recent_message, most_recent_usermessage
from zerver.models import AlertWord, UserProfile
class AlertWordTests(ZulipTestCase):
interesting_alert_word_list = ["alert", "multi-word word", "☃"]
def get_user(self) -> UserProfile:
# One nice thing about Hamlet is that he is
# already subscribed to Denmark.
user = self.example_user("hamlet")
# delete words from populate_db to simplify tests
AlertWord.objects.filter(user_profile=user).delete()
return user
def test_internal_endpoint(self) -> None:
user = self.get_user()
self.login_user(user)
params = {
"alert_words": orjson.dumps(["milk", "cookies"]).decode(),
}
result = self.client_post("/json/users/me/alert_words", params)
self.assert_json_success(result)
words = user_alert_words(user)
self.assertEqual(set(words), {"milk", "cookies"})
def test_default_no_words(self) -> None:
"""
Users start out with no alert words.
"""
user = self.get_user()
words = user_alert_words(user)
self.assertEqual(words, [])
def test_basics(self) -> None:
"""
Verifies the basic behavior of modifying alert words.
Also verifies the cache-flushing behavior.
"""
user = self.get_user()
realm_alert_words = alert_words_in_realm(user.realm)
self.assert_length(realm_alert_words.get(user.id, []), 0)
# Add several words, including multi-word and non-ascii words.
do_add_alert_words(user, self.interesting_alert_word_list)
words = user_alert_words(user)
self.assertEqual(set(words), set(self.interesting_alert_word_list))
realm_alert_words = alert_words_in_realm(user.realm)
self.assert_length(realm_alert_words[user.id], 3)
# Test the case-insensitivity of adding words
do_add_alert_words(user, {"ALert", "ALERT"})
words = user_alert_words(user)
self.assertEqual(set(words), set(self.interesting_alert_word_list))
realm_alert_words = alert_words_in_realm(user.realm)
self.assert_length(realm_alert_words[user.id], 3)
# Test the case-insensitivity of removing words
do_remove_alert_words(user, {"ALert"})
words = user_alert_words(user)
self.assertEqual(set(words), set(self.interesting_alert_word_list) - {"alert"})
realm_alert_words = alert_words_in_realm(user.realm)
self.assert_length(realm_alert_words[user.id], 2)
def test_remove_word(self) -> None:
"""
Removing alert words works via do_remove_alert_words, even
for multi-word and non-ascii words.
"""
user = self.get_user()
expected_remaining_alerts = set(self.interesting_alert_word_list)
do_add_alert_words(user, self.interesting_alert_word_list)
for alert_word in self.interesting_alert_word_list:
do_remove_alert_words(user, [alert_word])
expected_remaining_alerts.remove(alert_word)
actual_remaining_alerts = user_alert_words(user)
self.assertEqual(set(actual_remaining_alerts), expected_remaining_alerts)
def test_realm_words(self) -> None:
"""
We can gather alert words for an entire realm via
alert_words_in_realm. Alerts added for one user do not impact other
users.
"""
# Clear all the words that we got from populate_db.
AlertWord.objects.all().delete()
user1 = self.get_user()
do_add_alert_words(user1, self.interesting_alert_word_list)
user2 = self.example_user("othello")
do_add_alert_words(user2, ["another"])
realm_words = alert_words_in_realm(user2.realm)
self.assertEqual(len(realm_words), 2)
self.assertEqual(set(realm_words.keys()), {user1.id, user2.id})
self.assertEqual(set(realm_words[user1.id]), set(self.interesting_alert_word_list))
self.assertEqual(set(realm_words[user2.id]), {"another"})
def test_json_list_default(self) -> None:
user = self.get_user()
self.login_user(user)
result = self.client_get("/json/users/me/alert_words")
self.assert_json_success(result)
self.assertEqual(result.json()["alert_words"], [])
def test_json_list_nonempty(self) -> None:
user = self.get_user()
do_add_alert_words(user, ["one", "two", "three"])
self.login_user(user)
result = self.client_get("/json/users/me/alert_words")
self.assert_json_success(result)
self.assertEqual(set(result.json()["alert_words"]), {"one", "two", "three"})
def test_json_list_add(self) -> None:
user = self.get_user()
self.login_user(user)
result = self.client_post(
"/json/users/me/alert_words",
{"alert_words": orjson.dumps(["one ", "\n two", "three"]).decode()},
)
self.assert_json_success(result)
self.assertEqual(set(result.json()["alert_words"]), {"one", "two", "three"})
def test_json_list_remove(self) -> None:
user = self.get_user()
self.login_user(user)
result = self.client_post(
"/json/users/me/alert_words",
{"alert_words": orjson.dumps(["one", "two", "three"]).decode()},
)
self.assert_json_success(result)
self.assertEqual(set(result.json()["alert_words"]), {"one", "two", "three"})
result = self.client_delete(
"/json/users/me/alert_words", {"alert_words": orjson.dumps(["one"]).decode()}
)
self.assert_json_success(result)
self.assertEqual(set(result.json()["alert_words"]), {"two", "three"})
def message_does_alert(self, user: UserProfile, message: str) -> bool:
"""Send a bunch of messages as othello, so our user is notified"""
self.send_stream_message(self.example_user("othello"), "Denmark", message)
user_message = most_recent_usermessage(user)
return "has_alert_word" in user_message.flags_list()
def test_alert_flags(self) -> None:
user = self.get_user()
self.login_user(user)
result = self.client_post(
"/json/users/me/alert_words",
{"alert_words": orjson.dumps(["one", "two", "three"]).decode()},
)
self.assert_json_success(result)
self.assertEqual(set(result.json()["alert_words"]), {"one", "two", "three"})
# Alerts in the middle of messages work.
self.assertTrue(self.message_does_alert(user, "Normal alert one time"))
# Alerts at the end of messages work.
self.assertTrue(self.message_does_alert(user, "Normal alert one"))
# Alerts at the beginning of messages work.
self.assertTrue(self.message_does_alert(user, "two normal alerts"))
# Alerts with surrounding punctuation work.
self.assertTrue(self.message_does_alert(user, "This one? should alert"))
self.assertTrue(self.message_does_alert(user, "Definitely time for three."))
# Multiple alerts in a message work.
self.assertTrue(self.message_does_alert(user, "One two three o'clock"))
# Alerts are case-insensitive.
self.assertTrue(self.message_does_alert(user, "One o'clock"))
self.assertTrue(self.message_does_alert(user, "Case of ONE, won't stop me"))
# We don't cause alerts for matches in URLs.
self.assertFalse(self.message_does_alert(user, "Don't alert on http://t.co/one/ URLs"))
self.assertFalse(self.message_does_alert(user, "Don't alert on http://t.co/one URLs"))
# We don't cause alerts for matches within a word.
self.assertFalse(
self.message_does_alert(user, "Don't alert on clone, twofold or seventytwofold")
)
def test_update_alert_words(self) -> None:
user = self.get_user()
self.login_user(user)
result = self.client_post(
"/json/users/me/alert_words", {"alert_words": orjson.dumps(["ALERT"]).decode()}
)
content = "this is an ALERT for you"
self.send_stream_message(user, "Denmark", content)
self.assert_json_success(result)
original_message = most_recent_message(user)
user_message = most_recent_usermessage(user)
self.assertIn("has_alert_word", user_message.flags_list())
result = self.client_patch(
"/json/messages/" + str(original_message.id),
{
"message_id": original_message.id,
"content": "new ALERT for you",
},
)
self.assert_json_success(result)
user_message = most_recent_usermessage(user)
self.assertEqual(user_message.message.content, "new ALERT for you")
self.assertIn("has_alert_word", user_message.flags_list())
result = self.client_patch(
"/json/messages/" + str(original_message.id),
{
"message_id": original_message.id,
"content": "sorry false alarm",
},
)
self.assert_json_success(result)
user_message = most_recent_usermessage(user)
self.assertEqual(user_message.message.content, "sorry false alarm")
self.assertNotIn("has_alert_word", user_message.flags_list())
| 39.681818 | 95 | 0.649797 |
177718b33dc92ce41940f9dc56cc200f255f3ea9 | 7,043 | py | Python | discord/ui/input_text.py | zearakun/pycyat | 5f77f120df0caf6e4c4dd8b8f03c426354b0882f | [
"MIT"
] | null | null | null | discord/ui/input_text.py | zearakun/pycyat | 5f77f120df0caf6e4c4dd8b8f03c426354b0882f | [
"MIT"
] | null | null | null | discord/ui/input_text.py | zearakun/pycyat | 5f77f120df0caf6e4c4dd8b8f03c426354b0882f | [
"MIT"
] | null | null | null | from __future__ import annotations
import os
from typing import TYPE_CHECKING, Optional
from ..components import InputText as InputTextComponent
from ..enums import ComponentType, InputTextStyle
from ..utils import MISSING
__all__ = ("InputText",)
if TYPE_CHECKING:
from ..types.components import InputText as InputTextComponentPayload
class InputText:
"""Represents a UI text input field.
Parameters
----------
style: :class:`discord.InputTextStyle`
The style of the input text field.
custom_id: Optional[:class:`str`]
The ID of the input text field that gets received during an interaction.
label: :class:`str`
The label for the input text field.
Must be 45 characters or fewer.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
Must be 100 characters or fewer.
min_length: Optional[:class:`int`]
The minimum number of characters that must be entered.
Defaults to 0 and must be less than 4000.
max_length: Optional[:class:`int`]
The maximum number of characters that can be entered.
Must be between 1 and 4000.
required: Optional[:class:`bool`]
Whether the input text field is required or not. Defaults to `True`.
value: Optional[:class:`str`]
Pre-fills the input text field with this value.
Must be 4000 characters or fewer.
row: Optional[:class:`int`]
The relative row this input text field belongs to. A modal dialog can only have 5
rows. By default, items are arranged automatically into those 5 rows. If you'd
like to control the relative positioning of the row then passing an index is advised.
For example, row=1 will show up before row=2. Defaults to ``None``, which is automatic
ordering. The row number must be between 0 and 4 (i.e. zero indexed).
"""
def __init__(
self,
*,
style: InputTextStyle = InputTextStyle.short,
custom_id: str = MISSING,
label: str,
placeholder: Optional[str] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
required: Optional[bool] = True,
value: Optional[str] = None,
row: Optional[int] = None,
):
super().__init__()
custom_id = os.urandom(16).hex() if custom_id is MISSING else custom_id
if not (isinstance(custom_id, str) or custom_id is None):
raise TypeError(f"expected custom_id to be str, not {custom_id.__class__.__name__}")
self._underlying = InputTextComponent._raw_construct(
type=ComponentType.input_text,
style=style,
custom_id=custom_id,
label=label,
placeholder=placeholder,
min_length=min_length,
max_length=max_length,
required=required,
value=value,
)
self._input_value = False
self.row = row
self._rendered_row: Optional[int] = None
@property
def type(self) -> ComponentType:
return self._underlying.type
@property
def style(self) -> InputTextStyle:
""":class:`discord.InputTextStyle`: The style of the input text field."""
return self._underlying.style
@style.setter
def style(self, value: InputTextStyle):
if not isinstance(value, InputTextStyle):
raise TypeError(f"style must be of type InputTextStyle not {value.__class__}")
self._underlying.style = value
@property
def custom_id(self) -> str:
""":class:`str`: The ID of the input text field that gets received during an interaction."""
return self._underlying.custom_id
@custom_id.setter
def custom_id(self, value: str):
if not isinstance(value, str):
raise TypeError(f"custom_id must be None or str not {value.__class__}")
self._underlying.custom_id = value
@property
def label(self) -> str:
""":class:`str`: The label of the input text field."""
return self._underlying.label
@label.setter
def label(self, value: str):
if not isinstance(value, str):
raise TypeError(f"label should be str not {value.__class__}")
self._underlying.label = value
@property
def placeholder(self) -> Optional[str]:
"""Optional[:class:`str`]: The placeholder text that is shown before anything is entered, if any."""
return self._underlying.placeholder
@placeholder.setter
def placeholder(self, value: Optional[str]):
if value and not isinstance(value, str):
raise TypeError(f"placeholder must be None or str not {value.__class__}") # type: ignore
self._underlying.placeholder = value
@property
def min_length(self) -> Optional[int]:
"""Optional[:class:`int`]: The minimum number of characters that must be entered. Defaults to `0`."""
return self._underlying.min_length
@min_length.setter
def min_length(self, value: Optional[int]):
if value and not isinstance(value, int):
raise TypeError(f"min_length must be None or int not {value.__class__}") # type: ignore
self._underlying.min_length = value
@property
def max_length(self) -> Optional[int]:
"""Optional[:class:`int`]: The maximum number of characters that can be entered."""
return self._underlying.max_length
@max_length.setter
def max_length(self, value: Optional[int]):
if value and not isinstance(value, int):
raise TypeError(f"min_length must be None or int not {value.__class__}") # type: ignore
self._underlying.max_length = value
@property
def required(self) -> Optional[bool]:
"""Optional[:class:`bool`]: Whether the input text field is required or not. Defaults to `True`."""
return self._underlying.required
@required.setter
def required(self, value: Optional[bool]):
if not isinstance(value, bool):
raise TypeError(f"required must be bool not {value.__class__}") # type: ignore
self._underlying.required = bool(value)
@property
def value(self) -> Optional[str]:
"""Optional[:class:`str`]: The value entered in the text field."""
if self._input_value is not False:
# only False on init, otherwise the value was either set or cleared
return self._input_value # type: ignore
return self._underlying.value
@value.setter
def value(self, value: Optional[str]):
if value and not isinstance(value, str):
raise TypeError(f"value must be None or str not {value.__class__}") # type: ignore
self._underlying.value = value
@property
def width(self) -> int:
return 5
def to_component_dict(self) -> InputTextComponentPayload:
return self._underlying.to_dict()
def refresh_state(self, data) -> None:
self._input_value = data["value"]
| 37.663102 | 109 | 0.650859 |
e395e0e044aab6f7b82bd9dba6afbfff2001bbd5 | 1,677 | py | Python | roll_prob.py | slaymaker1907/dnd-stuff | 7da4294fadcf52759a033d68f5b429d571870ff7 | [
"MIT"
] | null | null | null | roll_prob.py | slaymaker1907/dnd-stuff | 7da4294fadcf52759a033d68f5b429d571870ff7 | [
"MIT"
] | null | null | null | roll_prob.py | slaymaker1907/dnd-stuff | 7da4294fadcf52759a033d68f5b429d571870ff7 | [
"MIT"
] | null | null | null | import collections as c
# Dicts mapping rolls to probabilities.
def combine_rolls(roll1, roll2):
result = c.defaultdict(lambda: 0)
for value1, prob1 in roll1.items():
for value2, prob2 in roll2.items():
result[value1 + value2] += prob1 * prob2
return result
def generic_roll(dice_type, dice_count):
base_prob = 1 / dice_type
base_roll = {i:base_prob for i in range(1, dice_type + 1)}
result = base_roll
for i in range(dice_count - 1):
result = combine_rolls(result, base_roll)
return result
def advantage_roll(dice_type):
result = dict()
search_space = pow(dice_type, 2)
for i in range(1, dice_type + 1):
nums_lower = i - 1
result[i] = (1 + nums_lower) / search_space
return result
def stats_roll():
def get_possib(dice_count):
if dice_count == 1:
return [(i,) for i in range(1, 7)]
result = []
previous = get_possib(dice_count - 1)
for prev in previous:
for i in range(1, 7):
result.append((i,) + prev)
return result
rolls = get_possib(4)
rolls = [sum(roll) - min(roll) for roll in rolls]
counts = c.Counter(rolls)
result = dict()
for val_sum, count in counts.items():
result[val_sum] = count / len(rolls)
return result
def make_culumlative(distribution, max_val, min_val):
cumulative = 0
result = dict()
for i in range(max_val, min_val, -1):
cumulative += distribution[i]
result[i] = cumulative
return result
stats_cumulative = make_culumlative(stats_roll(), 18, 3)
print(pow(stats_cumulative[15], 3))
print(generic_roll(8, 20))
| 29.946429 | 62 | 0.627311 |
22915b91623e8b3eeb5b8c65d97961d3e45b148d | 14,570 | py | Python | morf-python-api/build/lib/morf/workflow/extract.py | jpgard/morf | f17afcacef68929a5ce9e7714208be1002a42418 | [
"MIT"
] | 14 | 2018-06-27T13:15:46.000Z | 2021-08-30T08:24:38.000Z | morf-python-api/build/lib/morf/workflow/extract.py | jpgard/morf | f17afcacef68929a5ce9e7714208be1002a42418 | [
"MIT"
] | 58 | 2018-02-03T15:31:15.000Z | 2019-10-15T02:12:05.000Z | morf-python-api/build/lib/morf/workflow/extract.py | jpgard/morf | f17afcacef68929a5ce9e7714208be1002a42418 | [
"MIT"
] | 7 | 2018-03-29T14:47:34.000Z | 2021-06-22T01:34:52.000Z | # Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Feature extraction functions for the MORF 2.0 API. For more information about the API, see the documentation.
"""
from multiprocessing import Pool
from morf.utils.alerts import send_email_alert
from morf.utils.api_utils import *
from morf.utils.config import MorfJobConfig
from morf.utils.job_runner_utils import run_image
from morf.utils.log import set_logger_handlers
# define module-level variables for config.properties
CONFIG_FILENAME = "config.properties"
module_logger = logging.getLogger(__name__)
def extract_all():
"""
Extract features using the docker image across all courses and all sessions except holdout.
:return:
"""
mode = "extract"
level = "all"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
# only call job_runner once with --mode-extract and --level=all; this will load ALL data up and run the docker image
run_image(job_config, job_config.raw_data_buckets, level=level)
result_file = collect_all_results(job_config)
upload_key = make_s3_key_path(job_config, filename=result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_course(raw_data_dir="morf-data/", multithread = True):
"""
Extract features using the Docker image, building individual feature sets for each course.
:return:
"""
mode = "extract"
level = "course"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
# call job_runner once percourse with --mode=extract and --level=course
for raw_data_bucket in job_config.raw_data_buckets:
logger.info("processing bucket {}".format(raw_data_bucket))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
reslist = []
with Pool(num_cores) as pool:
for course in courses:
poolres = pool.apply_async(run_image, [job_config, raw_data_bucket, course, None, level, None])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
result_file = collect_course_results(job_config)
upload_key = make_s3_key_path(job_config, filename=result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_session(labels=False, raw_data_dir="morf-data/", label_type="labels-train", multithread=True):
"""
Extract features using the Docker image, building individual feature sets for each "session" or iteration of the course.
:labels: flag for whether this is a job to generate output labels; if so, the collected result file is copied back into the raw data folder in s3 (as labels-train.csv).
:raw_data_dir: path to directory in all data buckets where course-level directories are located; this should be uniform for every raw data bucket.
:label_type: type of outcome label to use (string).
:multithread: whether to run job in parallel (multithread = false can be useful for debugging).
:return:
"""
level = "session"
mode = "extract"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# # clear any preexisting data for this user/job/mode and set number of cores
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
## for each bucket, call job_runner once per session with --mode=extract and --level=session
for raw_data_bucket in job_config.raw_data_buckets:
logger.info("processing bucket {}".format(raw_data_bucket))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
reslist = []
with Pool(num_cores) as pool:
for course in courses:
for session in fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course, fetch_holdout_session_only=False):
poolres = pool.apply_async(run_image, [job_config, raw_data_bucket, course, session, level])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
if not labels: # normal feature extraction job; collects features across all buckets and upload to proc_data_bucket
result_file = collect_session_results(job_config)
upload_key = "{}/{}/extract/{}".format(job_config.user_id, job_config.job_id, result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
else: # label extraction job; copy file into raw course data dir instead of proc_data_bucket, creating separate label files for each bucket
for raw_data_bucket in job_config.raw_data_buckets:
result_file = collect_session_results(job_config, raw_data_buckets=[raw_data_bucket])
upload_key = raw_data_dir + "{}.csv".format(label_type)
upload_file_to_s3(result_file, bucket=raw_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_holdout_all():
"""
Extract features using the Docker image across all courses and all sessions of holdout data.
:return:
"""
mode = "extract-holdout"
level = "all"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
# only call job_runner once with --mode-extract and --level=all; this will load ALL data up and run the docker image
run_image(job_config, job_config.raw_data_buckets, level=level)
result_file = collect_all_results(job_config)
upload_key = make_s3_key_path(job_config, filename=result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_holdout_course(raw_data_dir="morf-data/", multithread = True):
"""
Extract features using the Docker image across each course of holdout data.
:return:
"""
mode = "extract-holdout"
level = "course"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
# call job_runner once percourse with --mode=extract and --level=course
for raw_data_bucket in job_config.raw_data_buckets:
logger.info("processing bucket {}".format(raw_data_bucket))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
reslist = []
with Pool(num_cores) as pool:
for course in courses:
holdout_session = fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course,
fetch_holdout_session_only=True)[0] # only use holdout run; unlisted
poolres = pool.apply_async(run_image, [job_config, raw_data_bucket, course, holdout_session, level, None])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
result_file = collect_course_results(job_config)
upload_key = make_s3_key_path(job_config, filename=result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_holdout_session(labels=False, raw_data_dir="morf-data/", label_type="labels-train", multithread=True):
"""
Extract features using the Docker image across each session of holdout data.
:labels: flag for whether this is a job to generate output labels; if so, the collected result file is copied back into the raw data folder in s3 (as labels-test.csv).
:return: None
"""
mode = "extract-holdout"
level = "session"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# call job_runner once per session with --mode=extract-holdout and --level=session
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
for raw_data_bucket in job_config.raw_data_buckets:
logger.info("[INFO] processing bucket {}".format(raw_data_bucket))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
reslist = []
with Pool(num_cores) as pool:
for course in courses:
holdout_session = fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course,
fetch_holdout_session_only=True)[0] # only use holdout run; unlisted
poolres = pool.apply_async(run_image, [job_config, raw_data_bucket, course, holdout_session, level])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
if not labels: # normal feature extraction job; collects features across all buckets and upload to proc_data_bucket
result_file = collect_session_results(job_config, holdout=True)
upload_key = "{}/{}/{}/{}".format(job_config.user_id, job_config.job_id, job_config.mode, result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
else: # label extraction job; copy file into raw course data dir instead of proc_data_bucket, creating separate label files for each bucket
for raw_data_bucket in job_config.raw_data_buckets:
result_file = collect_session_results(job_config, raw_data_buckets=[raw_data_bucket], holdout = True)
upload_key = raw_data_dir + "{}-test.csv".format(label_type)
upload_file_to_s3(result_file, bucket=raw_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def fork_features(job_id_to_fork, raw_data_dir = "morf-data/"):
"""
Copies features from job_id_to_fork into current job_id.
:param job_id_to_fork: string, name of job_id (must be from same user).
:return: None.
"""
job_config = MorfJobConfig(CONFIG_FILENAME)
#todo: multithread this
for mode in ["extract", "extract-holdout"]:
job_config.update_mode(mode)
clear_s3_subdirectory(job_config)
for raw_data_bucket in job_config.raw_data_buckets:
print("[INFO] forking features from bucket {} mode {}".format(raw_data_bucket, mode))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
for course in courses:
for session in fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course,
fetch_holdout_session_only = mode == "extract-holdout"):
# get current location of file, with old jobid name
prev_job_archive_filename = generate_archive_filename(job_config, course = course, session = session, mode = mode, job_id = job_id_to_fork)
# get location of prev archive file in s3
prev_job_key = make_s3_key_path(job_config, filename=prev_job_archive_filename, course=course, session=session, mode=mode, job_id=job_id_to_fork)
prev_job_s3_url = "s3://{}/{}".format(job_config.proc_data_bucket, prev_job_key)
# make new location of file, with new jobid name
current_job_archive_filename = generate_archive_filename(job_config, course=course, session=session,
mode=mode)
# copy frmo current location to new location
current_job_key = make_s3_key_path(job_config, filename=current_job_archive_filename, course=course,
session=session, mode=mode)
current_job_s3_url = "s3://{}/{}".format(job_config.proc_data_bucket, current_job_key)
copy_s3_file(job_config, sourceloc = prev_job_s3_url, destloc = current_job_s3_url)
# after copying individual extraction results, copy collected feature file
result_file = collect_session_results(job_config, holdout = mode == "extract-holdout")
upload_key = "{}/{}/{}/{}".format(job_config.user_id, job_config.job_id, job_config.mode, result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
return
| 50.590278 | 172 | 0.704118 |
c5389057ce8c483ea98a57cca8e33012d0dce7fd | 1,446 | py | Python | examples/searchbot_example.py | tysen2k/ffai | 2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec | [
"Apache-2.0"
] | null | null | null | examples/searchbot_example.py | tysen2k/ffai | 2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec | [
"Apache-2.0"
] | null | null | null | examples/searchbot_example.py | tysen2k/ffai | 2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from ffai.core import Agent, Game
from ffai.core.load import *
from ffai.ai.registry import register_bot, make_bot
import examples.scripted_bot_example
# Load configurations, rules, arena and teams
config = load_config("bot-bowl-i.json")
ruleset = load_rule_set(config.ruleset, all_rules=False) # We don't need all the rules
arena = load_arena(config.arena)
home = load_team_by_filename("human", ruleset)
away = load_team_by_filename("human", ruleset)
config.competition_mode = False
# Play 5 games as away
for i in range(5):
away_agent = make_bot('searchbot')
away_agent.name = 'searchbot'
home_agent = make_bot('searchbot')
home_agent.name = 'searchbot'
config.debug_mode = False
game = Game(i, home, away, home_agent, away_agent, config, arena=arena, ruleset=ruleset)
game.config.fast_mode = True
print("Starting game", (i + 1))
start = time.time()
game.init()
end = time.time()
print(end - start)
# Play 5 games as home
for i in range(5):
away_agent = make_bot('searchbot')
away_agent.name = 'searchbot'
home_agent = make_bot('searchbot')
home_agent.name = 'searchbot'
config.debug_mode = False
game = Game(i, home, away, home_agent, away_agent, config, arena=arena, ruleset=ruleset)
game.config.fast_mode = True
print("Starting game", (i + 1))
start = time.time()
game.init()
end = time.time()
print(end - start)
| 29.510204 | 92 | 0.697787 |
942c3627acd262b51a189fafadf3a87b1248fa70 | 205 | py | Python | src/eavatar.ava/ava/__init__.py | eavatar/ava | 4f09c5417b7187dd919b7edabb8c516d8efc0696 | [
"BSD-3-Clause"
] | null | null | null | src/eavatar.ava/ava/__init__.py | eavatar/ava | 4f09c5417b7187dd919b7edabb8c516d8efc0696 | [
"BSD-3-Clause"
] | null | null | null | src/eavatar.ava/ava/__init__.py | eavatar/ava | 4f09c5417b7187dd919b7edabb8c516d8efc0696 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import time
VERSION_MAJOR = 0
VERSION_MINOR = 1
VERSION_MICRO = time.time()
__version__ = '%d.%d.%d' % (VERSION_MAJOR, VERSION_MINOR, VERSION_MICRO)
VERSION_STRING = __version__
| 18.636364 | 72 | 0.717073 |
9fccdf46a86e70b879eb9c475d56ba89c04aa4b6 | 904 | py | Python | src/euler_python_package/euler_python/medium/p188.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | src/euler_python_package/euler_python/medium/p188.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | src/euler_python_package/euler_python/medium/p188.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | #
#
import sys
from euler_python.utils import eulerlib
def problem188():
x, y, m = 1777, 1855, 10 ** 8
sys.setrecursionlimit(
y + 30
) # Because the default recursion limit of 1000 is insufficient
ans = tetration_mod(x, y, m)
return ans
def tetration_mod(x, y, m):
if y == 1:
return x % m
else:
# Fact: If x and m are coprime, then x^y mod m = x^(y mod totient(m)) mod m
return pow(x, tetration_mod(x, y - 1, totient(m)), m)
def totient(n):
assert n > 0
p = 1
i = 2
end = eulerlib.sqrt(n)
while i <= end:
if n % i == 0: # Found a factor
p *= i - 1
n //= i
while n % i == 0:
p *= i
n //= i
end = eulerlib.sqrt(n)
i += 1
if n != 1:
p *= n - 1
return p
if __name__ == "__main__":
print(problem188())
| 19.652174 | 83 | 0.484513 |
31d9b97154e9c376a0f7765397099fd09f1e86f9 | 4,139 | py | Python | bigml/api_handlers/timeserieshandler.py | mmerce/python | 696ddc2a10c985cfe266ec2807c24b98f0c9a317 | [
"Apache-2.0"
] | null | null | null | bigml/api_handlers/timeserieshandler.py | mmerce/python | 696ddc2a10c985cfe266ec2807c24b98f0c9a317 | [
"Apache-2.0"
] | 6 | 2016-10-27T18:26:12.000Z | 2017-10-03T22:54:20.000Z | bigml/api_handlers/timeserieshandler.py | mmerce/python | 696ddc2a10c985cfe266ec2807c24b98f0c9a317 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for time series'' REST calls
https://bigml.com/api/timeseries
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type, \
resource_is_ready, get_time_series_id
from bigml.constants import TIME_SERIES_PATH
class TimeSeriesHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the TimeSeriesHandler. This class is intended
to be used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.time_series_url = self.url + TIME_SERIES_PATH
def create_time_series(self, datasets,
args=None, wait_time=3, retries=10):
"""Creates a time series from a `dataset`
of a list o `datasets`.
"""
create_args = self._set_create_from_datasets_args(
datasets, args=args, wait_time=wait_time, retries=retries)
body = json.dumps(create_args)
return self._create(self.time_series_url, body)
def get_time_series(self, time_series, query_string='',
shared_username=None, shared_api_key=None):
"""Retrieves a time series.
The model parameter should be a string containing the
time series id or the dict returned by
create_time_series.
As a time series is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the time series
values and state info available at the time it is called.
If this is a shared time series, the username and
sharing api key must also be provided.
"""
check_resource_type(time_series, TIME_SERIES_PATH,
message="A time series id is needed.")
return self.get_resource(time_series,
query_string=query_string,
shared_username=shared_username,
shared_api_key=shared_api_key)
def time_series_is_ready(self, time_series, **kwargs):
"""Checks whether a time series's status is FINISHED.
"""
check_resource_type(time_series, TIME_SERIES_PATH,
message="A time series id is needed.")
resource = self.get_time_series(time_series, **kwargs)
return resource_is_ready(resource)
def list_time_series(self, query_string=''):
"""Lists all your time series.
"""
return self._list(self.time_series_url, query_string)
def update_time_series(self, time_series, changes):
"""Updates a time series.
"""
check_resource_type(time_series, TIME_SERIES_PATH,
message="A time series id is needed.")
return self.update_resource(time_series, changes)
def delete_time_series(self, time_series):
"""Deletes a time series.
"""
check_resource_type(time_series, TIME_SERIES_PATH,
message="A time series id is needed.")
return self.delete_resource(time_series)
| 36.307018 | 76 | 0.657405 |
bf5ce32e8a8339c4e84d6a73d1169490899ae6a8 | 17,659 | py | Python | tests/test_basic.py | pierretr/troposphere | 1bd6a010a3132aa3436ffe6b892f352876face4b | [
"BSD-2-Clause"
] | 4,573 | 2015-01-02T20:31:04.000Z | 2022-03-31T17:15:32.000Z | tests/test_basic.py | pierretr/troposphere | 1bd6a010a3132aa3436ffe6b892f352876face4b | [
"BSD-2-Clause"
] | 1,730 | 2015-01-02T19:24:47.000Z | 2022-03-31T23:22:52.000Z | tests/test_basic.py | pierretr/troposphere | 1bd6a010a3132aa3436ffe6b892f352876face4b | [
"BSD-2-Clause"
] | 1,753 | 2015-01-01T01:24:12.000Z | 2022-03-27T05:36:17.000Z | import pickle
import unittest
from troposphere import (
AWSObject,
AWSProperty,
Cidr,
If,
Join,
NoValue,
Output,
Parameter,
Ref,
Region,
Split,
Sub,
Template,
cloudformation,
depends_on_helper,
)
from troposphere.ec2 import Instance, NetworkInterface, Route, SecurityGroupRule
from troposphere.elasticloadbalancing import HealthCheck
from troposphere.s3 import Bucket, PublicRead
from troposphere.validators import positive_integer
class TestBasic(unittest.TestCase):
def test_badproperty(self):
with self.assertRaises(AttributeError):
Instance(
"ec2instance",
foobar=True,
)
def test_badrequired(self):
with self.assertRaises(ValueError):
t = Template()
t.add_resource(NetworkInterface("networkinterface"))
t.to_json()
def test_badtype(self):
with self.assertRaises(AttributeError):
Instance("ec2instance", image_id=0.11)
def test_goodrequired(self):
NetworkInterface("interface", SubnetId="abc123")
def test_extraattribute(self):
class ExtendedInstance(Instance):
def __init__(self, *args, **kwargs):
self.attribute = None
super().__init__(*args, **kwargs)
instance = ExtendedInstance("ec2instance", attribute="value")
self.assertEqual(instance.attribute, "value")
def test_depends_on_helper_with_resource(self):
resource_name = "Bucket1"
b1 = Bucket(resource_name)
self.assertEqual(depends_on_helper(b1), resource_name)
def test_depends_on_helper_with_string(self):
resource_name = "Bucket1"
self.assertEqual(depends_on_helper(resource_name), resource_name)
def test_resource_depends_on(self):
b1 = Bucket("B1")
b2 = Bucket("B2", DependsOn=b1)
self.assertEqual(b1.title, b2.resource["DependsOn"])
def test_resource_depends_on_attr(self):
b1 = Bucket("B1")
b2 = Bucket("B2", DependsOn=b1)
self.assertEqual(b1.title, b2.DependsOn)
def test_resource_depends_on_list(self):
b1 = Bucket("B1")
b2 = Bucket("B2")
b3 = Bucket("B3", DependsOn=[b1, b2])
self.assertEqual(b1.title, b3.DependsOn[0])
self.assertEqual(b2.title, b3.DependsOn[1])
def test_pickle_ok(self):
# tests that objects can be pickled/un-pickled without hitting issues
bucket_name = "test-bucket"
b = Bucket("B1", BucketName=bucket_name)
p = pickle.dumps(b)
b2 = pickle.loads(p)
self.assertEqual(b2.BucketName, b.BucketName)
def double(x):
return positive_integer(x) * 2
def call_correct(x):
return x
def call_incorrect(x):
raise ValueError
class FakeAWSObject(AWSObject):
type = "Fake::AWS::Object"
props = {
"callcorrect": (call_correct, False),
"callincorrect": (call_incorrect, False),
"singlelist": (list, False),
"multilist": ([bool, int, float], False),
"multituple": ((bool, int), False),
"helperfun": (positive_integer, False),
"listhelperfun": ([double], False),
}
def validate(self):
properties = self.properties
title = self.title
type = self.type
if "callcorrect" in properties and "singlelist" in properties:
raise ValueError(
(
"Cannot specify both 'callcorrect and 'singlelist' in "
"object %s (type %s)" % (title, type)
)
)
class FakeAWSProperty(AWSProperty):
props = {}
class TestValidators(unittest.TestCase):
def test_callcorrect(self):
FakeAWSObject("fake", callcorrect=True)
def test_callincorrect(self):
with self.assertRaises(ValueError):
FakeAWSObject("fake", callincorrect=True)
def test_list(self):
FakeAWSObject("fake", singlelist=["a", 1])
def test_badlist(self):
with self.assertRaises(TypeError):
FakeAWSObject("fake", singlelist=True)
def test_multilist(self):
FakeAWSObject("fake", multilist=[1, True, 2, 0.3])
def test_badmultilist(self):
with self.assertRaises(TypeError):
FakeAWSObject("fake", multilist=True)
with self.assertRaises(TypeError):
FakeAWSObject("fake", multilist=[1, "a"])
def test_mutualexclusion(self):
t = Template()
t.add_resource(FakeAWSObject("fake", callcorrect=True, singlelist=[10]))
with self.assertRaises(ValueError):
t.to_json()
def test_tuples(self):
FakeAWSObject("fake", multituple=True)
FakeAWSObject("fake", multituple=10)
with self.assertRaises(TypeError):
FakeAWSObject("fake", multituple=0.1)
def test_helperfun(self):
FakeAWSObject("fake", helperfun=Ref("fake_ref"))
def test_listhelperfun(self):
with self.assertRaises(TypeError):
FakeAWSObject("fake", listhelperfun=1)
x = FakeAWSObject("fake", listhelperfun=[1, 2])
if x.listhelperfun != [2, 4]:
raise ValueError
with self.assertRaises(ValueError):
FakeAWSObject("fake", listhelperfun=[1, -2])
with self.assertRaises(ValueError):
FakeAWSObject("fake", listhelperfun=[1, "foo"])
def test_exception(self):
def ExceptionValidator(x):
raise ValueError
class ExceptionAWSProperty(AWSProperty):
props = {
"foo": (ExceptionValidator, True),
}
with self.assertRaises(ValueError):
ExceptionAWSProperty(foo="bar")
class TestHealthCheck(unittest.TestCase):
def test_healthy_interval_ok(self):
HealthCheck(
HealthyThreshold="2",
Interval="2",
Target="HTTP:80/index.html",
Timeout="4",
UnhealthyThreshold="9",
)
def test_healthy_interval_too_low(self):
with self.assertRaises(ValueError):
HealthCheck(
HealthyThreshold="1",
Interval="2",
Target="HTTP:80/index.html",
Timeout="4",
UnhealthyThreshold="9",
)
class TestOutput(unittest.TestCase):
def test_noproperty(self):
t = Output("MyOutput", Value="myvalue")
d = t.to_dict()
with self.assertRaises(KeyError):
d["Properties"]
def test_empty_awsproperty_outputs_empty_object(self):
t = FakeAWSProperty()
d = t.to_dict()
self.assertEqual(len(d), 0)
class TestParameter(unittest.TestCase):
def test_noproperty(self):
t = Parameter("MyParameter", Type="String")
d = t.to_dict()
with self.assertRaises(KeyError):
d["Properties"]
def test_property_validator(self):
p = Parameter("BasicString", Type="String", MaxLength=10)
p.validate()
p = Parameter("BasicString", Type="String", MaxValue=10)
with self.assertRaises(ValueError):
p.validate()
p = Parameter("BasicNumber", Type="Number", MaxValue=10)
p.validate()
p = Parameter("BasicNumber", Type="Number", AllowedPattern=".*")
with self.assertRaises(ValueError):
p.validate()
def test_invalid_parameter_property_in_template(self):
t = Template()
p = Parameter("BasicNumber", Type="Number", AllowedPattern=".*")
t.add_parameter(p)
with self.assertRaises(ValueError):
t.to_json()
def test_get_or_add_adds(self):
t = Template()
p = Parameter("param", Type="String", Default="foo")
result = t.get_or_add_parameter(p)
self.assertEqual(t.parameters["param"], p)
self.assertEqual(result, p)
def test_add_or_get_returns_with_out_adding_duplicate(self):
t = Template()
p = Parameter("param", Type="String", Default="foo")
t.add_parameter(p)
result = t.get_or_add_parameter(p)
self.assertEqual(t.parameters["param"], p)
self.assertEqual(result, p)
self.assertEqual(len(t.parameters), 1)
def test_property_default(self):
p = Parameter("param", Type="String", Default="foo")
p.validate()
p = Parameter("param", Type="Number", Default=1)
p.validate()
p = Parameter("param", Type="Number", Default=1.0)
p.validate()
p = Parameter("param", Type="Number", Default=0.1)
p.validate()
p = Parameter("param", Type="List<Number>", Default="1, 2, 3")
p.validate()
p = Parameter("param", Type="List<Number>", Default=" 0.1 , 2 , 1.1 ")
p.validate()
with self.assertRaises(ValueError):
p = Parameter("param", Type="String", Default=1)
p.validate()
with self.assertRaises(ValueError):
p = Parameter("param", Type="Number", Default="foo")
p.validate()
with self.assertRaises(TypeError):
p = Parameter("param", Type="Number", Default=["foo"])
p.validate()
with self.assertRaises(ValueError):
p = Parameter("param", Type="List<Number>", Default="foo")
p.validate()
with self.assertRaises(ValueError):
p = Parameter("param", Type="List<Number>", Default="1, 2, foo")
p.validate()
with self.assertRaises(TypeError):
p = Parameter("param", Type="List<Number>", Default=["1", "2"])
p.validate()
class TestProperty(unittest.TestCase):
def test_noproperty(self):
t = SecurityGroupRule(
IpProtocol="tcp",
FromPort="22",
ToPort="22",
CidrIp="0.0.0.0/0",
)
d = t.to_dict()
with self.assertRaises(KeyError):
d["Properties"]
def test_awsproperty_invalid_property(self):
t = FakeAWSProperty()
with self.assertRaises(AttributeError) as context:
t.badproperty = 5
self.assertTrue("FakeAWSProperty" in context.exception.args[0])
self.assertTrue("badproperty" in context.exception.args[0])
class TestDuplicate(unittest.TestCase):
def test_output(self):
t = Template()
o = Output("MyOutput", Value="myvalue")
t.add_output(o)
with self.assertRaises(ValueError):
t.add_output(o)
def test_parameter(self):
t = Template()
p = Parameter("MyParameter", Type="String")
t.add_parameter(p)
with self.assertRaises(ValueError):
t.add_parameter(p)
def test_resource(self):
t = Template()
r = FakeAWSObject("fake", callcorrect=True)
t.add_resource(r)
with self.assertRaises(ValueError):
t.add_resource(r)
class TestRef(unittest.TestCase):
def test_ref(self):
param = Parameter("param", Description="description", Type="String")
t = Ref(param)
ref = t.to_dict()
self.assertEqual(ref["Ref"], "param")
def test_ref_eq(self):
s = "AWS::NoValue"
r = Ref(s)
wch = cloudformation.WaitConditionHandle("TestResource")
self.assertEqual(s, r)
self.assertEqual(s, NoValue)
self.assertEqual(r, NoValue)
self.assertEqual(wch.Ref(), "TestResource")
self.assertNotEqual(r, "AWS::Region")
self.assertNotEqual(r, Region)
self.assertNotEqual(r, Ref)
self.assertNotEqual(wch.Ref(), "NonexistantResource")
def test_ref_hash(self):
s = hash("AWS::NoValue")
r = hash(Ref("AWS::NoValue"))
wch = cloudformation.WaitConditionHandle("TestResource")
self.assertEqual(s, r)
self.assertEqual(s, hash(NoValue))
self.assertEqual(r, hash(NoValue))
self.assertEqual(hash(wch.Ref()), hash("TestResource"))
self.assertNotEqual(r, hash("AWS::Region"))
self.assertNotEqual(r, hash(Region))
self.assertNotEqual(r, hash(Ref))
self.assertNotEqual(hash(wch.Ref()), hash("NonexistantResource"))
class TestName(unittest.TestCase):
def test_ref(self):
name = "fake"
t = Template()
resource = t.add_resource(Instance(name))
self.assertEqual(resource.name, name)
class TestCidr(unittest.TestCase):
def test_getcidr(self):
raw = Cidr("10.1.10.1/24", 2)
actual = raw.to_dict()
expected = {"Fn::Cidr": ["10.1.10.1/24", 2]}
self.assertEqual(expected, actual)
def test_getcidr_withsizemask(self):
raw = Cidr("10.1.10.1/24", 2, 10)
actual = raw.to_dict()
expected = {"Fn::Cidr": ["10.1.10.1/24", 2, 10]}
self.assertEqual(expected, actual)
class TestSub(unittest.TestCase):
def test_sub_without_vars(self):
s = "foo ${AWS::Region}"
raw = Sub(s)
actual = raw.to_dict()
expected = {"Fn::Sub": "foo ${AWS::Region}"}
self.assertEqual(expected, actual)
def test_sub_with_vars_unpakaged(self):
s = "foo ${AWS::Region} ${sub1} ${sub2}"
values = {"sub1": "uno", "sub2": "dos"}
raw = Sub(s, **values)
actual = raw.to_dict()
expected = {"Fn::Sub": ["foo ${AWS::Region} ${sub1} ${sub2}", values]}
self.assertEqual(expected, actual)
def test_sub_with_vars_not_unpakaged(self):
s = "foo ${AWS::Region} ${sub1} ${sub2}"
values = {"sub1": "uno", "sub2": "dos"}
raw = Sub(s, values)
actual = raw.to_dict()
expected = {"Fn::Sub": ["foo ${AWS::Region} ${sub1} ${sub2}", values]}
self.assertEqual(expected, actual)
def test_sub_with_vars_mix(self):
s = "foo ${AWS::Region} ${sub1} ${sub2} ${sub3}"
values = {"sub1": "uno", "sub2": "dos"}
raw = Sub(s, values, sub3="tres")
actual = raw.to_dict()
expected = {
"Fn::Sub": [
"foo ${AWS::Region} ${sub1} ${sub2} ${sub3}",
{"sub1": "uno", "sub2": "dos", "sub3": "tres"},
]
}
self.assertEqual(expected, actual)
class TestSplit(unittest.TestCase):
def test_split(self):
delimiter = ","
source_string = (
'{ "Fn::ImportValue": { "Fn::Sub": ' '"${VpcStack}-PublicSubnets" }'
)
raw = Split(delimiter, source_string)
actual = raw.to_dict()
expected = {
"Fn::Split": [
",",
'{ "Fn::ImportValue": { ' '"Fn::Sub": "${VpcStack}-PublicSubnets" }',
]
}
self.assertEqual(expected, actual)
with self.assertRaises(ValueError):
Split(10, "foobar")
class TestJoin(unittest.TestCase):
def test_join(self):
delimiter = ","
source_string = (
'{ [ "arn:aws:lambda:",{ "Ref": "AWS::Region" },":",'
'{ "Ref": "AWS::AccountId" },'
'":function:cfnRedisEndpointLookup" ] }'
)
raw = Join(delimiter, source_string)
actual = raw.to_dict()
expected = {
"Fn::Join": [
",",
'{ [ "arn:aws:lambda:",{ "Ref": '
'"AWS::Region" },":",{ "Ref": "AWS::AccountId" },'
'":function:cfnRedisEndpointLookup" ] }',
]
}
self.assertEqual(expected, actual)
with self.assertRaises(ValueError):
Join(10, "foobar")
class TestValidation(unittest.TestCase):
def test_validation(self):
route = Route(
"Route66",
DestinationCidrBlock="0.0.0.0/0",
RouteTableId=Ref("RouteTable66"),
InstanceId=If("UseNat", Ref("AWS::NoValue"), Ref("UseNat")),
NatGatewayId=If("UseNat", Ref("UseNat"), Ref("AWS::NoValue")),
)
t = Template()
t.add_resource(route)
with self.assertRaises(ValueError):
t.to_json()
def test_novalidation(self):
route = Route(
"Route66",
validation=False,
DestinationCidrBlock="0.0.0.0/0",
RouteTableId=Ref("RouteTable66"),
InstanceId=If("UseNat", Ref("AWS::NoValue"), Ref("UseNat")),
NatGatewayId=If("UseNat", Ref("UseNat"), Ref("AWS::NoValue")),
)
t = Template()
t.add_resource(route)
t.to_json()
def test_no_validation_method(self):
route = Route(
"Route66",
DestinationCidrBlock="0.0.0.0/0",
RouteTableId=Ref("RouteTable66"),
InstanceId=If("UseNat", Ref("AWS::NoValue"), Ref("UseNat")),
NatGatewayId=If("UseNat", Ref("UseNat"), Ref("AWS::NoValue")),
).no_validation()
t = Template()
t.add_resource(route)
t.to_json()
test_updatereplacepolicy_yaml = """\
Resources:
S3Bucket:
Properties:
AccessControl: PublicRead
Type: AWS::S3::Bucket
UpdateReplacePolicy: Retain
"""
class TestAttributes(unittest.TestCase):
def test_BogusAttribute(self):
t = Template()
with self.assertRaises(AttributeError):
t.add_resource(Bucket("S3Bucket", Bogus="Retain"))
def test_UpdateReplacePolicy(self):
t = Template()
t.add_resource(
Bucket(
"S3Bucket",
AccessControl=PublicRead,
UpdateReplacePolicy="Retain",
)
)
t.to_yaml()
self.assertEqual(t.to_yaml(), test_updatereplacepolicy_yaml)
if __name__ == "__main__":
unittest.main()
| 30.394148 | 85 | 0.579931 |
894376bd4fd9e4abb84a9c267679c5eadbcd2b9b | 1,335 | py | Python | tests/validation_data.py | DSAdv/tcp-tls-tunnel-py | e9b5271e4cfae1df09b9fab77db4906b7cee8337 | [
"MIT"
] | 1 | 2021-08-30T21:03:41.000Z | 2021-08-30T21:03:41.000Z | tests/validation_data.py | DSAdv/tcp-tls-tunnel-py | e9b5271e4cfae1df09b9fab77db4906b7cee8337 | [
"MIT"
] | 1 | 2022-03-31T12:02:29.000Z | 2022-03-31T12:02:29.000Z | tests/validation_data.py | DSAdv/tcp-tls-tunnel-py | e9b5271e4cfae1df09b9fab77db4906b7cee8337 | [
"MIT"
] | 1 | 2021-08-28T14:35:18.000Z | 2021-08-28T14:35:18.000Z |
HOWSMYSSL_VALIDATION_RESPONSE = {
'given_cipher_suites': [
'TLS_GREASE_IS_THE_WORD_AA',
'TLS_AES_128_GCM_SHA256',
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256',
'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256',
'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384',
'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384',
'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256',
'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256',
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA',
'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA',
'TLS_RSA_WITH_AES_128_GCM_SHA256',
'TLS_RSA_WITH_AES_256_GCM_SHA384',
'TLS_RSA_WITH_AES_128_CBC_SHA',
'TLS_RSA_WITH_AES_256_CBC_SHA'
],
'ephemeral_keys_supported': True,
'session_ticket_supported': True,
'tls_compression_supported': False,
'unknown_cipher_suite_supported': False,
'beast_vuln': False,
'able_to_detect_n_minus_one_splitting': False,
'insecure_cipher_suites': {},
'tls_version': 'TLS 1.3',
'rating': 'Probably Okay',
}
| 43.064516 | 64 | 0.602247 |
48b861b4975598c12cc3b1ba2a63e9d06b3ae77d | 7,828 | py | Python | tests/network_coefficient_plots.py | RenYuanXue/residual-sample-nn | 0f05a88c9e9d99b3c1c73dc4d8a2d638a689ebfb | [
"MIT"
] | 2 | 2020-03-06T00:27:19.000Z | 2020-03-06T00:27:57.000Z | tests/network_coefficient_plots.py | RenYuanXue/residual-sample-nn | 0f05a88c9e9d99b3c1c73dc4d8a2d638a689ebfb | [
"MIT"
] | 3 | 2021-04-30T21:13:23.000Z | 2022-02-10T01:14:50.000Z | tests/network_coefficient_plots.py | RenYuanXue/residual-sample-nn | 0f05a88c9e9d99b3c1c73dc4d8a2d638a689ebfb | [
"MIT"
] | 2 | 2020-04-17T20:33:25.000Z | 2020-04-17T20:46:19.000Z | import sys
sys.path.append("../residual-sample-nn") # for finding the source files
import GenerateData as generate_data
import Network as Network
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import configparser
def RSNN_param_test(X_train, X_test, y_train, y_test, h_nodes, epochs, lr, times, threshold, coefficient, type):
"""
Trains a regular 3-layer our NN, and returns the accuracy results
:param X_train: Numpy array/pandas dataframe, data for the keras model.
:param X_test: Numpy array/pandas dataframe, data for the keras model.
:param y_train: Numpy array/pandas dataframe, data for the keras model.
:param y_test: Numpy array/pandas dataframe, data for the keras model.
:param h_nodes: Int, number of nodes in the hidden layer.
:param epochs: Int, number of epochs to train the NN.
:param lr: Float, learning rate for training.
:param times: Integer, number of times to MC sample.
:param threshold: Integer, used as threshold for forming residual to sample from.
:param coefficient: Float, used as the ratio for updating the variance.
:param type: string, select type of loss function.
:return: (training_acc, test_acc), training/testing accuracies
"""
in_dim = X_train.shape[1]
out_dim = y_train.shape[1]
#Initialize the network
np.random.seed(10) # Ensures each network is initialized the same way.
net = Network.Network([in_dim, h_nodes, out_dim], type = type, pdw =['gaussian'] * 2, pdb =['gaussian'] * 2)
net.Learn(X_train, y_train, epochs=epochs, lrate = lr, times = times, threshold = threshold, bootstrap = False, coefficient = coefficient)
acc_train = net.ClassificationAccuracy(X_train, y_train)
acc_test = net.ClassificationAccuracy(X_test, y_test)
return(acc_train, acc_test)
def network_coefficient_plots(X_train, X_test, y_train, y_test, rs_nn_params, coef_samples, fold=False):
"""
Trains on a data set.
Then plots are created and saved to the working directory comparing training/test accuracies during the training
process for each neural network.
:param X_train: numpy array/pandas dataframe, input data to train
:param X_test: numpy array/pandas dataframe, input data to test
:param y_train: numpy array/pandas dataframe, target data to train
:param y_test: numpy array/pandas dataframe, target data to test
:param rs_nn_params: Dict, parameters used to create and train the residual sameple neural net
:param keras_params: Dict, parameters used to create and train the keras neural net
:param decay_params: Dict, parameters used to create and train the keras neural net with decay
:param coef_samples: Dict, used to set start,stop, and number of points for various coefficients attempted.
:param fold: Boolean, used to select whether folds are to be used (only iris).
:return:
"""
coef_trials = np.linspace(float(coef_samples['start']), float(coef_samples['stop']),num = int(coef_samples['points']))
trials = len(coef_trials)
train_acc = [0] * trials # For storing training accuracies.
test_acc = [0] * trials # For storing test accuracies.
if fold:
# Train a RS-NN
splits = len(X_train)
for i, num_sample in enumerate(coef_trials):
for j in range(0, splits):
(RSNN_train_acc, RSNN_test_acc) = RSNN_param_test(X_train[j], X_test[j], y_train[j], y_test[j],
int(rs_nn_params['h_nodes']), int(rs_nn_params['epochs']),
float(rs_nn_params['lr']), int(rs_nn_params['times']),
float(rs_nn_params['threshold']),
num_sample,type= rs_nn_params['type'])
train_acc[i] += np.array(RSNN_train_acc) / splits
test_acc[i] += np.array(RSNN_test_acc) / splits
else:
# Train a RS-NN
for i, num_sample in enumerate(coef_trials):
(RSNN_train_acc, RSNN_test_acc) = RSNN_param_test(X_train, X_test, y_train, y_test,
int(rs_nn_params['h_nodes']),
int(rs_nn_params['epochs']),
float(rs_nn_params['lr']),int(rs_nn_params['times']),
float(rs_nn_params['threshold']),
num_sample,
type=rs_nn_params['type'])
train_acc[i] = RSNN_train_acc
test_acc[i] = RSNN_test_acc
# Plot the accuracies
plt.figure(0)
plt.title("Training Accuracy vs. Coefficient")
plt.scatter(coef_trials, train_acc)
plt.xlabel('Coefficient')
plt.ylabel('Accuracy')
plt.savefig('train_acc_coefsamples.png')
plt.figure(1)
plt.title("Test Accuracy vs. Coefficient")
plt.scatter(coef_trials, test_acc)
plt.xlabel('Coefficient')
plt.ylabel('Accuracy')
plt.savefig('test_acc_coefsamples.png')
plt.show()
def main():
config = configparser.ConfigParser()
config.read('config_coefficient.ini')
# select correct data
train_size = int(config['DATA']['train_size'])
test_size = int(config['DATA']['test_size'])
if config['DATA']['dataset'] == "simulated":
num_cov = int(config['DATA']['num_cov'])
mu = float(config['DATA']['mu'])
std = float(config['DATA']['std'])
range_cov = float(config['DATA']['range_cov'])
range_coef = float(config['DATA']['range_coef'])
range_bias = float(config['DATA']['range_bias'])
generator = generate_data.GenerateData(num_cov, mu, std,
range_cov, range_coef, range_bias, seed=100)# Maybe add to config file..
X_train, y_train, _ = generator.generate(seed=15, sample_size=train_size)
X_test, y_test, _ = generator.generate(seed=16, sample_size=test_size)
network_coefficient_plots(X_train, X_test, y_train, y_test, config['RS NN PARAMS'], config['COEFFICIENT'])
if config['DATA']['dataset'] == "mnist":
import mnist_loader
train_full, validate_full, test_full = mnist_loader.load_data_wrapper() # we wont need validate dataset
X_train = np.array(train_full[0][:train_size])
y_train = np.array(train_full[1][:train_size])
X_test = np.array(test_full[0][:test_size])
y_test = np.array(test_full[1][:test_size])
network_coefficient_plots(X_train, X_test, y_train, y_test, config['RS NN PARAMS'], config['COEFFICIENT'])
if config['DATA']['dataset'] == "iris":
from sklearn import datasets
from sklearn.model_selection import KFold
fold = True
data = datasets.load_iris()
y_train = pd.get_dummies(data.target).values
X_train = data.data
splits = int(config['DATA']['splits']) # Number of splits selected.
kf = KFold(splits)
kf.get_n_splits(X_train)
kf.split(X_train)
Xtr = []
Xtest = []
ytr = []
ytest = []
for train_index, test_index in kf.split(X_train):
# train
# print("TRAIN:", train_index, "TEST:", test_index)
Xtr.append(X_train[train_index])
ytr.append(y_train[train_index])
# test
Xtest.append(X_train[test_index])
ytest.append(y_train[test_index])
network_coefficient_plots(Xtr, Xtest, ytr, ytest, config['RS NN PARAMS'], config['COEFFICIENT'], fold=fold)
# run benchmarking function
if __name__ == "__main__":
main()
| 47.731707 | 142 | 0.627619 |
68a5a791e8feb36b742f033238643818f7ab4d7c | 1,251 | py | Python | setup.py | patrickaudriaz/mini-project | 49d8200b8210fe1b09a1a05f2357260a77e10c36 | [
"MIT"
] | 4 | 2020-10-07T11:16:29.000Z | 2021-09-19T05:49:32.000Z | setup.py | patrickaudriaz/mini-project | 49d8200b8210fe1b09a1a05f2357260a77e10c36 | [
"MIT"
] | 12 | 2020-09-09T05:59:43.000Z | 2020-10-07T13:23:18.000Z | setup.py | patrickaudriaz/mini-project | 49d8200b8210fe1b09a1a05f2357260a77e10c36 | [
"MIT"
] | 1 | 2020-10-06T03:52:05.000Z | 2020-10-06T03:52:05.000Z | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
def load_requirements(f):
retval = [str(k.strip()) for k in open(f, "rt")]
return [k for k in retval if k and k[0] not in ("#", "-")]
setup(
name="rrgp",
version="1.0.7",
description="This project aims to train a model to recognise human activities (like walking, standing, or sitting) based on accelerometer and gyroscope data collected with a smartphone.",
url="https://github.com/patrickaudriaz/mini-project",
license="MIT",
author="Geoffrey Raposo, Patrick Audriaz",
author_email="[email protected], [email protected]",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
packages=find_packages(),
include_package_data=True,
install_requires=load_requirements("requirements.txt"),
entry_points={"console_scripts": ["rrgp = rrgp.run:main"]},
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 35.742857 | 191 | 0.673062 |
99d05b4f5d9395b3236b58b8060a91c56948b8f9 | 9,983 | py | Python | analysis/cloud9/coverage.py | wangpeipei90/cloud9 | a15558d1fa3e397b157b1191aa1ef9b225eef2dd | [
"BSD-3-Clause"
] | 40 | 2015-01-08T10:28:20.000Z | 2022-03-11T02:48:46.000Z | analysis/cloud9/coverage.py | mcanthony/cloud9 | a15558d1fa3e397b157b1191aa1ef9b225eef2dd | [
"BSD-3-Clause"
] | 3 | 2015-04-13T04:13:39.000Z | 2020-03-28T05:23:41.000Z | analysis/cloud9/coverage.py | mcanthony/cloud9 | a15558d1fa3e397b157b1191aa1ef9b225eef2dd | [
"BSD-3-Clause"
] | 24 | 2015-03-30T04:39:28.000Z | 2022-01-04T17:17:33.000Z | #!/usr/bin/python
#
# Copyright (c) 2012 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Dependable Systems Laboratory, EPFL nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE DEPENDABLE SYSTEMS LABORATORY, EPFL BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# All contributors are listed in CLOUD9-AUTHORS file.
#
"""Coverage log manipulation."""
__author__ = "[email protected] (Stefan Bucur)"
import fnmatch
import logging
import os
import shutil
import struct
import sys
import jinja2
import cloud9.CoverageLogs_pb2 as CoverageLogs
################################################################################
# Annotation data - Final product passed to the template rendering engine
################################################################################
class AnnotatedSourceFile(object):
def __init__(self, source_stats, lines):
self.source_stats = source_stats
self.lines = lines
class SourceFileStatistics(object):
def __init__(self, file_name, coverage, covered_line_count, coverable_line_count):
self.file_name = file_name
self.coverage = coverage
self.covered_line_count = covered_line_count
self.coverable_line_count = coverable_line_count
class AnnotatedSourceLine(object):
def __init__(self, number, contents, coverable, covered):
self.number = number
self.contents = contents
self.coverable = coverable
self.covered = covered
################################################################################
# Protobuf Parsing
################################################################################
def _ReadCoverageInfoEntry(data_file):
"""Reads a packet of data from the specified file."""
UINT32_SIZE = 4
pkt_size_buf = data_file.read(UINT32_SIZE)
if len(pkt_size_buf) != UINT32_SIZE:
raise ValueError("Invalid packet size read.")
pkt_size = struct.unpack("I", pkt_size_buf)[0]
pkt = data_file.read(pkt_size)
if len(pkt) != pkt_size:
raise ValueError("Incomplete packet.")
return pkt
def _ReadCoverageInfoMessage(message):
"""Parses a coverage protobuf message."""
cov_info = {}
for file_cov_msg in message.file_coverage:
file_path = os.path.abspath(file_cov_msg.file_name)
cov_info[file_path] = set(file_cov_msg.covered_lines)
return cov_info
def ReadLastCoverageInfo(data_file):
"""Reads the last set of coverage values from the specified file."""
try:
header_pkt = _ReadCoverageInfoEntry(data_file)
except ValueError as e:
logging.error("Cannot read header packet: %s" % e)
sys.exit(1)
coverable_lines_msg = CoverageLogs.CoverageInfo()
coverable_lines_msg.ParseFromString(header_pkt)
last_coverage_pkt = None
while True:
try:
last_coverage_pkt = _ReadCoverageInfoEntry(data_file)
except ValueError:
break
if not last_coverage_pkt:
logging.error("Cannot read coverage packet.")
sys.exit(1)
covered_lines_msg = CoverageLogs.CoverageInfo()
covered_lines_msg.ParseFromString(last_coverage_pkt)
coverable_lines = _ReadCoverageInfoMessage(coverable_lines_msg)
covered_lines = _ReadCoverageInfoMessage(covered_lines_msg)
return coverable_lines, covered_lines
################################################################################
class SourceFilter(object):
"""Filter object based on a .coverable file."""
def __init__(self, file_list):
self.file_list = file_list
def IsValid(self, file_name):
"""Returns True if file_name passes the filter."""
for suffix in self.file_list:
if file_name.endswith(suffix):
return True
return False
def LoadSourceFilter(coverable_file_name):
"""Loads a SourceFilter from a specified file."""
with open(coverable_file_name, "r") as cov_file:
file_list = [line.strip() for line in cov_file.readlines()]
return SourceFilter(file_list)
################################################################################
class CoverageAnnotator(object):
"""Produces coverage annotation."""
def __init__(self, coverage_data=None, ground_coverage_data=None, source_filter=None):
self.source_filter = source_filter
self.coverage_data = coverage_data
self.ground_coverage_data = ground_coverage_data
# Statistics
self._total_coverable_count = 0
self._total_covered_count = 0
def AnnotateSourceFile(self, file_name):
"""Annotates source file based on available coverage information."""
coverable_lines = set()
covered_lines = set()
# Search for the available coverage information
for candidate_name in self.ground_coverage_data.iterkeys():
if candidate_name.endswith(file_name):
coverable_lines = self.ground_coverage_data[candidate_name]
break
for candidate_name in self.coverage_data.iterkeys():
if candidate_name.endswith(file_name):
covered_lines = self.coverage_data[candidate_name]
# Generate statistics
self._total_coverable_count += len(coverable_lines)
self._total_covered_count += len(covered_lines)
source_stats = SourceFileStatistics(
file_name, 100.0*len(covered_lines)/len(coverable_lines)
if coverable_lines else 0.0,
len(covered_lines),
len(coverable_lines))
# Generate per-line annotation
annotated_file = AnnotatedSourceFile(source_stats, [])
with open(file_name, "r") as source_file:
line_counter = 1
for source_line in source_file:
annotated_line = AnnotatedSourceLine(
line_counter, source_line.rstrip(),
line_counter in coverable_lines,
line_counter in covered_lines)
annotated_file.lines.append(annotated_line)
line_counter += 1
return annotated_file
def AnnotateDirectory(self, root_path):
"""Annotate all source files under the given directory."""
annotation_data = []
for dirpath, _, filenames in os.walk(root_path):
for filename in filenames:
if not self._DefaultFileNameFilter(filename):
continue
file_path = os.path.abspath(os.path.join(dirpath, filename))
logging.info("Processing '%s'" % file_path)
if self.source_filter and not self.source_filter.IsValid(file_path):
logging.info(" *SKIPPING*")
continue
annotated_file = self.AnnotateSourceFile(file_path)
annotation_data.append(annotated_file)
return annotation_data
@staticmethod
def _DefaultFileNameFilter(file_name):
return (fnmatch.fnmatch(file_name, '*.cc')
or fnmatch.fnmatch(file_name, '*.h'))
################################################################################
class AnnotationRenderer(object):
def __init__(self):
self.base_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
# Construct the HTML template
self.jinja_env = jinja2.Environment(
loader=jinja2.PackageLoader("cloud9", "views"))
self.report_template = self.jinja_env.get_template("report.html")
self.source_template = self.jinja_env.get_template("source-list.html")
@staticmethod
def _CreateUniqueDirectory(output_path, write_sym_link=True):
sym_link = None
unique_path = output_path
if os.path.exists(unique_path):
counter = 0
while os.path.exists(unique_path):
counter += 1
unique_path = "%s-%d" % (output_path, counter)
sym_link = "%s-last" % output_path
os.mkdir(unique_path)
if sym_link and write_sym_link:
if os.path.islink(sym_link):
os.unlink(sym_link)
if not os.path.exists(sym_link):
os.symlink(unique_path, sym_link)
return unique_path
def RenderAnnotation(self, annotation_data, output_path):
"""Renders HTML report of specified coverage annotation.
The current directory is changed in the report.
"""
output_path = self._CreateUniqueDirectory(output_path)
os.chdir(output_path)
with open("report.html", "w") as report_file:
report_file.write(
self.report_template.render(annotation_data=annotation_data))
os.mkdir("src")
for annotated_file in annotation_data:
with open("src/%s.html" % annotated_file.source_stats.file_name.replace("/", "_"), "w") as source_file:
source_file.write(self.source_template.render(source_stats=annotated_file.source_stats, lines=annotated_file.lines))
for static in ["cloud9/css/source.css", "cloud9/css/report.css"]:
shutil.copy(os.path.join(self.base_dir, static), ".")
| 32.624183 | 124 | 0.67625 |
a8895b2313c148747c0fe50b6ef84ede31f3fcb3 | 4,945 | py | Python | scripts/release-diff.py | EBI-Metabolights/galaxy | 4b3c94822a2e859c83bef221fe99ddb87d4d9cae | [
"CC-BY-3.0"
] | null | null | null | scripts/release-diff.py | EBI-Metabolights/galaxy | 4b3c94822a2e859c83bef221fe99ddb87d4d9cae | [
"CC-BY-3.0"
] | null | null | null | scripts/release-diff.py | EBI-Metabolights/galaxy | 4b3c94822a2e859c83bef221fe99ddb87d4d9cae | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import glob
import subprocess
from pathlib import Path
import yaml
def flatten(d, path):
"""
Flatten a dictionary into ('some/path/to/key', value)
>>> flatten({'a': {'b': 2}, 'q': 3}, [])
[('a.b', 2), ('q', 3)]
"""
if isinstance(d, dict):
for k, v in d.items():
yield from flatten(v, path + [k])
else:
yield (".".join(path), d)
def flat_dict(d):
return dict(flatten(d, []))
# Load without the includes since we can't follow those across git revisions.
class MockOrderedLoader(yaml.SafeLoader):
def include(self, node):
return {}
MockOrderedLoader.add_constructor("!include", MockOrderedLoader.include)
def diff_files(old, new):
# Flatten them
old_kv = flat_dict(old)
new_kv = flat_dict(new)
# Compare them
old_k = set(old_kv.keys())
new_k = set(new_kv.keys())
added = []
for item in new_k - old_k:
parent = ".".join(item.split(".")[0:-1])
if parent in new_k and parent not in old_k:
added.append(item)
else:
added.append(parent)
added = set(added)
removed = []
for item in old_k - new_k:
parent = ".".join(item.split(".")[0:-1])
if parent in old_k and parent not in new_k:
removed.append(item)
else:
removed.append(parent)
removed = set(removed)
shared = old_k & new_k
changed = [(k, old_kv[k], new_kv[k]) for k in shared if old_kv[k] != new_kv[k]]
return added, removed, changed
def _report_dict(title, subheading, data, mapper):
print(title)
print("-" * len(title))
print()
print(subheading)
print()
for fn in data:
print(fn)
print("~" * len(fn))
print()
for k in data[fn]:
print(mapper(k))
print()
print()
def _indent(s, by=4):
whitespace = " " * by
s = s if isinstance(s, list) else s.splitlines()
return "\n".join(f"{whitespace}{line}" for line in s)
def report_diff(added, changed, removed, new_files):
# Print out report
if added or changed or removed:
print("Configuration Changes")
print("=====================")
print()
if added:
_report_dict("Added", "The following configuration options are new", added, lambda x: f"- {x}")
if changed:
_report_dict(
"Changed",
"The following configuration options have been changed",
changed,
lambda x: f"- {x[0]} has changed from\n\n ::\n\n{_indent(x[1])}\n\n to\n\n ::\n\n{_indent(x[2])}\n\n",
)
if removed:
_report_dict(
"Removed", "The following configuration options have been completely removed", removed, lambda x: f"- {x}"
)
if new_files:
print("New Configuration Files")
print("-----------------------")
print()
print("The following files are new, or recently converted to yaml")
print()
for k in new_files:
print(f"- ``{k}``")
def load_at_time(path, revision=None):
if revision is not None:
return subprocess.check_output(["git", "show", f"{revision}:{path}"], stderr=subprocess.STDOUT)
else:
with open(path) as handle:
return handle.read()
def main(old_revision, new_revision=None):
globs = (
"config/*.yml.sample",
"lib/galaxy/config/schemas/*schema.yml",
)
files_to_diff = [f for g in globs for f in glob.glob(g)]
added = {}
removed = {}
changed = {}
new_files = []
for file in files_to_diff:
filename = file
if "config_schema.yml" in file:
filename = "config/galaxy.yml.sample:galaxy"
elif "uwsgi_schema.yml" in file:
filename = "config/galaxy.yml.sample:uwsgi"
real_path = Path(file).resolve().relative_to(Path.cwd())
try:
old_contents = yaml.load(load_at_time(real_path, old_revision), Loader=MockOrderedLoader)
new_contents = yaml.load(load_at_time(real_path, new_revision), Loader=MockOrderedLoader)
(a, r, c) = diff_files(old_contents, new_contents)
if a:
added[filename] = sorted(a)
if r:
removed[filename] = sorted(r)
if c:
changed[filename] = sorted(c)
except subprocess.CalledProcessError:
new_files.append(file)
report_diff(added, changed, removed, new_files)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Diff yaml configuration files between two points in time.")
parser.add_argument("old_revision", help="Old revision")
parser.add_argument(
"--new_revision",
help="New revision (defaults to whatever is currently in tree)",
)
args = parser.parse_args()
main(args.old_revision, args.new_revision)
| 27.17033 | 121 | 0.581193 |