id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
168740 | from JumpScale import j
def cb():
from .CodeTools import CodeTools
return CodeTools()
j._register('codetools', cb)
| StarcoderdataPython |
27307 | "Common functions that may be used everywhere"
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import sys
from distutils.util import strtobool
try:
input = raw_input
except NameError:
pass
def yes_no_query(question):
"""Ask the user *question* for 'yes' or 'no'; ask again until user
inputs a valid option.
Returns:
'True' if user answered 'y', 'yes', 't', 'true', 'on' or '1'.
'False' if user answered 'n', 'no', 'f', 'false', 'off' or '0'.
"""
print("{} (y/n)".format(question), end=" "),
while True:
try:
return strtobool(input().lower())
except ValueError:
print("Please respond with 'y' or 'n'.")
def ask_overwrite(dest):
"""Check if file *dest* exists. If 'True', asks if the user wants
to overwrite it (just remove the file for later overwrite).
"""
msg = "File '{}' already exists. Overwrite file?".format(dest)
if os.path.exists(dest):
if yes_no_query(msg):
os.remove(dest)
else:
sys.exit("Cancelling operation...")
| StarcoderdataPython |
99458 | import json
from django.test import TestCase, LiveServerTestCase
import django.test.client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
import oauthlib.oauth1.rfc5849
from restless_oauth.models import *
import requests
from oauth_hook import OAuthHook
import urlparse
class OAuthTestClient(django.test.client.Client):
def __init__(self, *args, **kwargs):
super(OAuthTestClient, self).__init__(*args, **kwargs)
self.client_key = None
self.client_secret = None
self.access_token = None
self.access_secret = None
self.oauth_verifier = None
self.oauth_uri = None
self.oauth_client = None
def _init_oauth_client(self):
if self.client_key:
self.oauth_client = oauthlib.oauth1.rfc5849.Client(self.client_key,
client_secret=self.client_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_secret,
verifier=self.oauth_verifier)
else:
self.oauth_client = None
def set_uri(self, uri):
self.oauth_uri = unicode(uri)
def set_client_key(self, client_key, client_secret=None):
self.client_key = unicode(client_key)
self.client_secret = unicode(client_secret)
self._init_oauth_client()
def set_access_token(self, access_token, access_secret=None):
self.access_token = unicode(access_token)
self.access_secret = unicode(access_secret)
self._init_oauth_client()
def set_verifier(self, verifier):
self.oauth_verifier = verifier
self._init_oauth_client()
@staticmethod
def process(response):
try:
response.json = json.loads(response.content)
except Exception:
response.json = None
finally:
return response
def inject_oauth_headers(self, headers, method):
if self.oauth_client and self.oauth_uri:
uri, oauth_headers, body = self.oauth_client.sign(self.oauth_uri,
http_method=unicode(method))
headers = dict(headers)
for k, v in oauth_headers.items():
headers['HTTP_' + k.upper()] = v
return headers
def get(self, url_name, data={}, follow=False, extra={}, *args, **kwargs):
return self.process(
super(OAuthTestClient, self).get(
reverse(url_name, args=args, kwargs=kwargs),
data=data,
follow=follow,
**self.inject_oauth_headers(extra, 'GET')))
def post(self, url_name, data={}, follow=False, extra={}, *args, **kwargs):
return self.process(
super(OAuthTestClient, self).post(
reverse(url_name, args=args, kwargs=kwargs),
data=data,
follow=follow,
**self.inject_oauth_headers(extra, 'POST')))
def put(self, url_name, data={}, follow=False, extra={}, *args, **kwargs):
return self.process(
super(OAuthTestClient, self).put(
reverse(url_name, args=args, kwargs=kwargs),
data=data, follow=follow,
**self.inject_oauth_headers(extra, 'PUT')))
def delete(self, url_name, data={}, follow=False, extra={}, *args,
**kwargs):
return self.process(
super(OAuthTestClient, self).delete(
reverse(url_name, args=args, kwargs=kwargs),
content_type=content_type, data=data, follow=follow,
**self.inject_oauth_headers(extra, 'DELETE')))
class OAuthViewTest(TestCase):
def clean(self):
User.objects.all().delete()
OAuthClient.objects.all().delete()
OAuthRequestToken.objects.all().delete()
OAuthAccessToken.objects.all().delete()
OAuthVerifier.objects.all().delete()
OAuthNonce.objects.all().delete()
def setUp(self):
self.clean()
s = Site.objects.get(id=1)
s.domain = 'localhost'
s.save()
Site.objects.clear_cache()
self.client_key = u'CLIENTKEYCLIENTKEYCLIENTKEY'
self.client_secret = u'CLIENTSECRETCLIENTSECRET'
self.user = User.objects.create_user(username='foo', password='<PASSWORD>')
self.client = OAuthClient.objects.create(key=self.client_key,
secret=self.client_secret)
self.testclient = OAuthTestClient()
def tearDown(self):
self.clean()
def test_returns_bad_req_if_no_oauth_signature(self):
tc = OAuthTestClient()
r = tc.post('oauth_get_request_token')
self.assertEqual(r.status_code, 400)
def test_returns_bad_req_if_invalid_client_key(self):
tc = OAuthTestClient()
tc.set_client_key('foo', 'bar')
tc.set_uri('http://localhost/oauth/request_token')
r = tc.post('oauth_get_request_token')
self.assertEqual(r.status_code, 400)
self.assertTrue('client key' in r.json.get('error', ''))
def test_returns_bad_req_if_nonexistent_client(self):
tc = OAuthTestClient()
tc.set_client_key(u'CLIENTKEYDOESNTEXIST',
u'CLIENTSECRETDOESNTEXIST')
tc.set_uri('http://localhost/oauth/request_token')
r = tc.post('oauth_get_request_token')
self.assertEqual(r.status_code, 400)
self.assertTrue('client key' in r.json.get('error', ''))
def test_returns_unauthorized_on_uri_mismatch(self):
tc = OAuthTestClient()
tc.set_client_key(self.client_key, self.client_secret)
tc.set_uri('http://localhost/incorrect/uri')
r = tc.post('oauth_get_request_token')
self.assertEqual(r.status_code, 401)
def test_get_request_token_succeeds(self):
tc = OAuthTestClient()
tc.set_client_key(self.client_key, self.client_secret)
tc.set_uri('http://localhost/oauth/request_token')
r = tc.post('oauth_get_request_token')
self.assertEqual(r.status_code, 200)
self.assertTrue(OAuthRequestToken.objects.filter(
token=r.json['oauth_token'],
secret=r.json['oauth_token_secret']).exists())
def test_returns_unauthorized_if_request_replay_attempted(self):
tc = OAuthTestClient()
tc.set_client_key(self.client_key, self.client_secret)
tc.set_uri('http://localhost/oauth/request_token')
r = tc.post('oauth_get_request_token')
self.assertEqual(r.status_code, 200)
tc2 = OAuthTestClient()
r2 = tc2.post('oauth_get_request_token',
extra={'HTTP_AUTHORIZATION': r.request['HTTP_AUTHORIZATION']})
self.assertEqual(r2.status_code, 401)
def test_get_access_token_succeeds(self):
request_token = OAuthRequestToken.generate(self.client)
verifier = OAuthVerifier.generate(self.user, request_token)
tc = OAuthTestClient()
tc.set_client_key(self.client_key, self.client_secret)
tc.set_access_token(request_token.token, request_token.secret)
tc.set_verifier(verifier.verifier)
tc.set_uri('http://localhost/oauth/access_token')
r = tc.post('oauth_get_access_token')
self.assertEqual(r.status_code, 200)
self.assertTrue(OAuthAccessToken.objects.filter(
token=r.json['oauth_token'],
secret=r.json['oauth_token_secret']).exists())
def test_get_access_token_returns_bad_req_on_invalid_verifier(self):
request_token = OAuthRequestToken.generate(self.client)
tc = OAuthTestClient()
tc.set_client_key(self.client_key, self.client_secret)
tc.set_access_token(request_token.token, request_token.secret)
tc.set_verifier(u'INVALIDVERIFIERKEY')
tc.set_uri('http://localhost/oauth/access_token')
r = tc.post('oauth_get_access_token')
self.assertEqual(r.status_code, 400)
self.assertTrue('verifier' in r.json.get('error', ''))
def test_get_access_token_returns_bad_req_on_invalid_request_token(self):
request_token = OAuthRequestToken.generate(self.client)
verifier = OAuthVerifier.generate(self.user, request_token)
tc = OAuthTestClient()
tc.set_client_key(self.client_key, self.client_secret)
tc.set_access_token(u'INVALIDREQUESTTOKEN', u'INVALIDREQUESTSECRET')
tc.set_verifier(verifier.verifier)
tc.set_uri('http://localhost/oauth/access_token')
r = tc.post('oauth_get_access_token')
self.assertEqual(r.status_code, 400)
self.assertTrue('resource owner key' in r.json.get('error', ''))
def test_access_protected_resource_fails_without_oauth(self):
tc = OAuthTestClient()
tc.set_uri('http://localhost/secret/')
r = tc.get('protected_endpoint')
self.assertEqual(r.status_code, 403)
def test_access_protected_resource_succeeds_with_oauth(self):
token = OAuthAccessToken.generate(self.user, self.client)
tc = OAuthTestClient()
tc.set_client_key(self.client_key, self.client_secret)
tc.set_access_token(token.token, token.secret)
tc.set_uri('http://localhost/secret/')
r = tc.get('protected_endpoint')
self.assertEqual(r.status_code, 200)
self.assertTrue(r.json['success'])
class RequestsInteropTest(LiveServerTestCase):
def clean(self):
User.objects.all().delete()
OAuthClient.objects.all().delete()
OAuthRequestToken.objects.all().delete()
OAuthAccessToken.objects.all().delete()
OAuthVerifier.objects.all().delete()
OAuthNonce.objects.all().delete()
def setUp(self):
self.clean()
s = Site.objects.get(id=1)
s.domain = urlparse.urlparse(self.live_server_url).netloc
s.save()
Site.objects.clear_cache()
self.client_key = u'CLIENTKEYCLIENTKEYCLIENTKEY'
self.client_secret = u'CLIENTSECRETCLIENTSECRET'
self.user = User.objects.create_user(username='foo', password='<PASSWORD>')
self.client = OAuthClient.objects.create(key=self.client_key,
secret=self.client_secret)
self.testclient = OAuthTestClient()
def url_for(self, url_name, *args, **kwargs):
return '%s%s' % (self.live_server_url,
reverse(url_name, args=args, kwargs=kwargs))
def tearDown(self):
self.clean()
def test_get_request_token_params_in_query(self):
hook = OAuthHook(consumer_key=self.client_key,
consumer_secret=self.client_secret)
r = requests.post(self.url_for('oauth_get_request_token'),
hooks={'pre_request': hook})
self.assertEqual(r.status_code, 200)
self.assertTrue(OAuthRequestToken.objects.filter(
token=r.json['oauth_token'],
secret=r.json['oauth_token_secret']).exists())
def test_get_request_token_params_in_headers(self):
hook = OAuthHook(consumer_key=self.client_key,
consumer_secret=self.client_secret, header_auth=True)
r = requests.post(self.url_for('oauth_get_request_token'),
hooks={'pre_request': hook})
self.assertEqual(r.status_code, 200)
self.assertTrue(OAuthRequestToken.objects.filter(
token=r.json['oauth_token'],
secret=r.json['oauth_token_secret']).exists())
def test_get_request_token_params_in_body(self):
hook = OAuthHook(consumer_key=self.client_key,
consumer_secret=self.client_secret)
r = requests.post(self.url_for('oauth_get_request_token'),
hooks={'pre_request': hook}, data='foo=bar', headers={
'Content-Type': 'application/x-www-form-urlencoded'
}, config={'max_retries': 0})
self.assertEqual(r.status_code, 200)
self.assertTrue(OAuthRequestToken.objects.filter(
token=r.json['oauth_token'],
secret=r.json['oauth_token_secret']).exists())
def test_get_access_token_succeeds(self):
request_token = OAuthRequestToken.generate(self.client)
verifier = OAuthVerifier.generate(self.user, request_token)
hook = OAuthHook(request_token.token, request_token.secret,
self.client_key, self.client_secret, header_auth=True)
r = requests.post(self.url_for('oauth_get_access_token'),
hooks={'pre_request': hook}, data={
'oauth_verifier': verifier.verifier,
}, config={'max_retries': 0})
self.assertEqual(r.status_code, 200)
self.assertTrue(OAuthAccessToken.objects.filter(
token=r.json['oauth_token'],
secret=r.json['oauth_token_secret']).exists())
def test_access_protected_resource_succeeds(self):
token = OAuthAccessToken.generate(self.user, self.client)
hook = OAuthHook(token.token, token.secret,
self.client_key, self.client_secret, header_auth=True)
r = requests.get(self.url_for('protected_endpoint'),
hooks={'pre_request': hook}, config={'max_retries': 0})
self.assertEqual(r.status_code, 200)
self.assertTrue(r.json['success'])
| StarcoderdataPython |
1769667 | # -*- coding: utf-8 -*-
#
# This file is part of SplashSync Project.
#
# Copyright (C) 2015-2020 Splash Sync <www.splashsync.com>
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
import base64
from pathlib import Path
from splashpy.client import SplashClient
class Files():
"""Various Function to Work with Files"""
@staticmethod
def getFile(file, md5):
"""Read a file from Splash Server"""
# ====================================================================#
# Initiate File Request Contents
request = {
"tasks": {
"task": {
"id": 1,
"name": "ReadFile",
"desc": "Read file",
"params": {"file": file, "md5": md5},
}
}
}
# ====================================================================#
# Execute Task
response = SplashClient.getInstance().file(request)
# ====================================================================#
# Verify Response
if response is False:
return None
if "result" not in response or response["result"] is not "1":
return None
try:
task = response["tasks"]['task']
return task["data"]
except Exception:
return None
@staticmethod
def getAssetsPath():
import os
base_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
return base_path + "/assets"
@staticmethod
def getRawContents(path):
if not Path(path).exists():
return ""
with open(path, 'rb') as file:
return str(base64.b64encode(file.read()), "UTF-8")
| StarcoderdataPython |
3243838 | <filename>client/tc/azext_tc/_completers.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.decorators import Completer
from knack.log import get_logger
from ._client_factory import teamcloud_client_factory
logger = get_logger(__name__)
def _ensure_base_url(client, base_url):
client._client._base_url = base_url # pylint: disable=protected-access
@Completer
def get_org_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = teamcloud_client_factory(cmd.cli_ctx)
_ensure_base_url(client, namespace.base_url)
result = client.get_organizations()
try:
return [p.displayName for p in result.data]
except AttributeError:
return []
| StarcoderdataPython |
3204749 | import asyncio
import importlib
import json
import logging
import os
import pprint
import re
import sys
import time
import docker
import netaddr
import netifaces
import sh
import tornado.httpclient
from wotemu.enums import Labels
_CGROUP_PATH = "/proc/self/cgroup"
_STACK_NAMESPACE = "com.docker.stack.namespace"
_CID_HOST_LEN = 12
_STATE_RUNNING = "running"
_logger = logging.getLogger(__name__)
class NodeHTTPTimeout(Exception):
pass
async def _ping_catalogue(catalogue_url, thing_ids=None):
thing_ids = thing_ids or []
http_client = tornado.httpclient.AsyncHTTPClient()
try:
catalogue_res = await http_client.fetch(catalogue_url)
catalogue = json.loads(catalogue_res.body)
assert all(thing_id in catalogue for thing_id in thing_ids)
_logger.debug("Catalogue ping OK: %s", catalogue_url)
return True
except Exception as ex:
_logger.debug("Catalogue ping error (%s): %s", catalogue_url, repr(ex))
return False
finally:
http_client.close()
async def _ping_catalogue_timeout(catalogue_url, wait, timeout, thing_ids=None):
_logger.debug("Waiting for catalogue:\n%s", pprint.pformat({
"catalogue_url": catalogue_url,
"wait": wait,
"timeout": timeout,
"thing_ids": thing_ids
}))
ini = time.time()
def _raise_timeout():
if timeout is None:
return
diff = time.time() - ini
if diff >= timeout:
raise NodeHTTPTimeout(
f"HTTP timeout ({timeout} s): {catalogue_url}")
while True:
_raise_timeout()
if (await _ping_catalogue(catalogue_url, thing_ids=thing_ids)):
break
_raise_timeout()
await asyncio.sleep(wait)
async def wait_node(conf, name, wait=2, timeout=120, find_replicas=True, thing_ids=None):
cont_hosts = [name]
if find_replicas:
_logger.debug((
"Attempting to translate service name '%s' "
"to the container hostnames of all the "
"replicas for that service"
), name)
try:
cont_hosts = get_service_container_hostnames(
docker_url=conf.docker_proxy_url,
name=name)
except Exception as ex:
_logger.warning("Error finding container hostnames: %s", ex)
_logger.warning("Using untranslated service name: %s", cont_hosts)
catalogue_urls = [
"http://{}:{}".format(host, conf.port_catalogue)
for host in cont_hosts
]
_logger.debug("Catalogue URLs: %s", catalogue_urls)
ping_awaitables = [
_ping_catalogue_timeout(
catalogue_url=url,
wait=wait,
timeout=timeout,
thing_ids=thing_ids)
for url in catalogue_urls
]
await asyncio.gather(*ping_awaitables)
def _find_service_container_hosts(docker_api_client, service_name):
task_filters = {
"service": service_name,
"desired-state": _STATE_RUNNING
}
_logger.debug("Filtering Docker tasks using filters: %s", task_filters)
try:
service_tasks = docker_api_client.tasks(filters=task_filters)
except Exception as ex:
_logger.warning(
"Error finding Docker tasks (filters: %s): %s",
task_filters, ex)
return []
_logger.debug(
"Found %s tasks for service: %s",
len(service_tasks), service_name)
return [
task["Status"]["ContainerStatus"]["ContainerID"][:_CID_HOST_LEN]
for task in service_tasks
]
def get_service_container_hostnames(docker_url, name):
docker_api_client = docker.APIClient(base_url=docker_url)
_logger.debug("Finding container hostnames for: %s", name)
service_parts = name.split(".")
try:
network_candidate = service_parts[-1]
docker_api_client.inspect_network(network_candidate)
_logger.debug("Found network: %s", network_candidate)
base_name = ".".join(service_parts[:-1])
except docker.errors.NotFound:
_logger.debug("Network not found: %s", network_candidate)
base_name = name
namespace = get_current_stack_namespace(docker_url)
service_names = [f"{namespace}_" + base_name]
if base_name.startswith(f"{namespace}_"):
service_names.append(base_name)
ret = [
_find_service_container_hosts(
docker_api_client=docker_api_client,
service_name=service_name)
for service_name in service_names
]
ret = [host for item in ret for host in item]
if not len(ret):
raise Exception("Could not find container hostnames for: %s", name)
_logger.debug("Service %s container hostnames: %s", name, ret)
return ret
def ping_docker(docker_url):
try:
docker_client = docker.DockerClient(base_url=docker_url)
docker_client.ping()
except Exception as ex:
raise Exception("Could not ping Docker daemon: {}".format(ex))
def get_current_container_id():
try:
with open(_CGROUP_PATH, "r") as fh:
cgroup = fh.read()
except FileNotFoundError as ex:
raise Exception((
"The current environment does not "
"seem to be a Docker container ({})"
).format(ex))
cid_regex = r"\d+:.+:\/docker\/([a-zA-Z0-9]+)"
result = re.search(cid_regex, cgroup)
if not result or len(result.groups()) <= 0:
_logger.warning("Could not find container ID in:\n%s", cgroup)
raise Exception("Could not retrieve container ID")
cid = result.groups()[0]
_logger.debug("Current container ID: %s", cid)
return cid
def get_task_container_id(task_dict):
return task_dict.get("Status", {}).get("ContainerStatus", {}).get("ContainerID", None)
def get_current_task(docker_url):
docker_api_client = docker.APIClient(base_url=docker_url)
cid = get_current_container_id()
task = next((
task for task in docker_api_client.tasks()
if get_task_container_id(task) == cid), None)
if task is None:
raise Exception("Could not find task for container: {}".format(cid))
return task
def get_current_stack_namespace(docker_url):
curr_task = get_current_task(docker_url=docker_url)
return curr_task.get("Spec", {}).get("ContainerSpec", {}).get("Labels", {}).get(_STACK_NAMESPACE, None)
def get_task_networks(docker_url, task):
docker_api_client = docker.APIClient(base_url=docker_url)
network_ids = [
net["Network"]["ID"]
for net in task["NetworksAttachments"]
]
networks = {
net_id: docker_api_client.inspect_network(net_id)
for net_id in network_ids
}
networks = {
net_id: net_info for net_id, net_info in networks.items()
if net_info.get("Labels", {}).get(Labels.WOTEMU_NETWORK.value, None) is not None
}
return list(networks.keys())
def get_task_labels(docker_url, task_name):
docker_api_client = docker.APIClient(base_url=docker_url)
task_info = docker_api_client.inspect_task(task_name)
return task_info["Spec"]["ContainerSpec"]["Labels"]
def get_network_gateway_task(docker_url, network_id):
docker_api_client = docker.APIClient(base_url=docker_url)
network_info = docker_api_client.inspect_network(network_id, verbose=True)
service_infos = {
net_name: info
for net_name, info in network_info["Services"].items()
if len(net_name) > 0
}
_logger.debug(
"Network %s services:\n%s",
network_id,
pprint.pformat(list(service_infos.keys())))
task_infos = {
task_info["Name"]: task_info
for net_name, serv_info in service_infos.items()
for task_info in serv_info["Tasks"]
}
_logger.debug(
"Network %s tasks:\n%s",
network_id,
pprint.pformat(list(task_infos.keys())))
task_labels = {
task_name: get_task_labels(docker_url, task_name)
for task_name in task_infos.keys()
}
return next(
task_infos[task_name]
for task_name, labels in task_labels.items()
if labels.get(Labels.WOTEMU_GATEWAY.value, None) is not None)
def get_output_iface_for_task(net_task_dict):
task_name = net_task_dict["Name"]
task_addr = netaddr.IPAddress(net_task_dict["EndpointIP"])
iface_addrs = {
name: netifaces.ifaddresses(name).get(netifaces.AF_INET)
for name in netifaces.interfaces()
if netifaces.ifaddresses(name).get(netifaces.AF_INET)
}
_logger.debug(
"Current container interfaces:\n%s",
pprint.pformat(iface_addrs))
ret = next(
(iface_name, addr)
for iface_name, iface_addrs in iface_addrs.items()
for addr in iface_addrs
if task_addr in netaddr.IPNetwork("{}/{}".format(addr["addr"], addr["netmask"])))
_logger.debug("Output interface for %s: %s", task_name, ret)
return ret
def strip_ansi_codes(val):
"""Attribution to: https://stackoverflow.com/a/15780675"""
return re.sub(r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?', "", val)
def import_func(module_path, func_name):
_logger.debug("Attempting to import module: %s", module_path)
path_root, path_base = os.path.split(module_path)
if path_root not in sys.path:
sys.path.insert(0, path_root)
mod_name, _ext = os.path.splitext(path_base)
mod_import = importlib.import_module(mod_name)
mod_dir = dir(mod_import)
_logger.info("Imported: %s", mod_import)
_logger.debug("dir(%s): %s", mod_import, mod_dir)
if func_name not in mod_dir:
raise Exception("Module {} does not contain function '{}'".format(
mod_import, func_name))
return getattr(mod_import, func_name)
async def consume_from_catalogue(wot, port_catalogue, servient_host, thing_id):
http_client = tornado.httpclient.AsyncHTTPClient()
cat_url = "http://{}:{}".format(servient_host, port_catalogue)
_logger.debug("Fetching catalogue: %s", cat_url)
catalogue_res = await http_client.fetch(cat_url)
catalogue = json.loads(catalogue_res.body)
_logger.debug("Catalogue:\n%s", pprint.pformat(catalogue))
if thing_id not in catalogue:
raise Exception(f"Thing '{thing_id}' not in catalogue: {cat_url}")
td_url = "http://{}:{}/{}".format(
servient_host,
port_catalogue,
catalogue[thing_id].strip("/"))
_logger.debug("Consuming from URL: %s", td_url)
return await wot.consume_from_url(td_url)
def cgget(name):
try:
sh_cgget = sh.Command("cgget")
cmd_parts = ["-v", "-r", name, "/"]
proc = sh_cgget(cmd_parts, _err_to_out=True)
_logger.debug("%s: %s", proc.ran, proc.stdout)
match = re.search(r"(-?\d+)\n", proc.stdout.decode("utf8"))
return int(match.group(1)) if match else None
except:
_logger.warning("Error running cgget for: %s", exc_info=True)
return None
| StarcoderdataPython |
31224 | <reponame>sundayliu/flask-tutorial
# -*- coding:utf-8 -*-
from flask import Blueprint
main = Blueprint('main',__name__)
from . import views,errors
from ..models import Permission
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission) | StarcoderdataPython |
9812 | <reponame>yaosir0317/my_first
from enum import Enum
import requests
class MusicAPP(Enum):
qq = "qq"
wy = "netease"
PRE_URL = "http://www.musictool.top/"
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"}
def get_music_list(name, app, page=1):
data = {"input": name, "filter": "name", "type": app, "page": page}
resp = requests.post(url=PRE_URL, headers=headers, data=data)
print(resp.text)
print(resp.json())
if __name__ == '__main__':
get_music_list("画", MusicAPP.qq)
| StarcoderdataPython |
3369738 | from src.contexts.kms.clients.domain.entities.ClientId import ClientId
from src.contexts.kms.cryptokeys.domain.entities.CryptoKey import CryptoKey
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyId import CryptoKeyId
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyIsMaster import CryptoKeyIsMaster
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyIsPrivate import CryptoKeyIsPrivate
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyParameters import CryptoKeyParameters
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyPayload import CryptoKeyPayload
from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyType import CryptoKeyType
from src.contexts.kms.cryptokeys.domain.repositories.CryptoKeyRepository import CryptoKeyRepository
from src.contexts.shared.domain.EventBus import EventBus
class CryptoKeyCreator:
def __init__(self, cryptokey_repository: CryptoKeyRepository, event_bus: EventBus):
self._cryptokey_repository = cryptokey_repository
self._event_bus = event_bus
async def run(self, cryptokey_id: CryptoKeyId, client_id: ClientId, cryptokey_type: CryptoKeyType,
payload: CryptoKeyPayload, parameters: CryptoKeyParameters, is_master: CryptoKeyIsMaster,
is_private: CryptoKeyIsPrivate):
cryptokey: CryptoKey = CryptoKey.create(cryptokey_id, client_id, cryptokey_type, payload, parameters, is_master,
is_private)
await self._cryptokey_repository.create_one(cryptokey)
await self._event_bus.publish(cryptokey.pull_domain_events())
| StarcoderdataPython |
4805945 | #coding=utf-8
import requests
s=requests
###init
# data = {"labelData":"{'性别': [], '年龄': [], '病理诊断': ['腺鳞癌'], '病人批次': []}"}
data = {"labelData":"{'性别': ['男'], '年龄': ['40','50'], '病理诊断': [], '病人批次': ['2018-01-01','2018-01-10']}"}
# data = {"labelData": "{'性别': ['男'], '年龄': ['65'], '病理诊断': [], '病人批次': ['2018-01-01','2018-01-10']}"}
# data ={"labelData":"{'性别': [], '年龄': [], '病理诊断': [],'病人批次': []}"}
r=s.post('http://192.168.75.251:8686/label_search',data)
##interact
#data={"kaks_mean":"较高"}
##genete word
#data={"report_id":1}
#r=s.post('http://192.168.75.251:8686/generate_word',data)
#r=s.post('http://192.168.75.251:8686/report_interact',data)
print(r.text) | StarcoderdataPython |
1659856 | from typing import List, Dict
from zhixuewang.teacher.urls import Url
from zhixuewang.teacher.models import TeaPerson
class Teacher(TeaPerson):
"""老师账号"""
def __init__(self, session):
super().__init__()
self._session = session
self.role = "teacher"
def set_base_info(self):
r = self._session.get(
Url.TEST_URL,
headers={
"referer":
"https://www.zhixue.com/container/container/teacher/index/"
})
json_data = r.json()["teacher"]
self.email = json_data.get("email")
self.gender = "男" if json_data["gender"] == "1" else "女"
self.id = json_data.get("id")
self.mobile = json_data.get("mobile")
self.name = json_data.get("name")
self.role = json_data["roles"][-1]
return self
def get_score(self, user_num, clazz_id, topicSetId):
r = self._session.get(
"https://www.zhixue.com/exportpaper/class/getExportStudentInfo/",
params={
"type": "allTopicUserNum",
"classId": clazz_id,
"studentNum": user_num,
"topicSetId": topicSetId,
"topicNumber": "0",
"startScore": "0",
"endScore": "0",
})
d = r.json()
return d.get("result")[0]["userScore"]
def get_topicSets(self, examId):
r = self._session.get(
f"https://www.zhixue.com/exportpaper/class/getSubjectChoice/?examId={examId}"
)
d = r.json()
return d["result"]
def get_class_score(self, classId, topicSetId) -> List[Dict[str, float]]:
r = self._session.get(
"https://www.zhixue.com/exportpaper/class/getExportStudentInfo/",
params={
"type": "export_single_paper_zip",
"classId": classId,
"studentNum": "",
"topicSetId": topicSetId,
"topicNumber": "0",
"startScore": "0",
"endScore": "10000",
})
d = r.json()
return [{
"name": i["userName"],
"score": i["userScore"],
} for i in d["result"]]
class Headmaster(Teacher): # 校长
def __init__(self, session):
super().__init__(session)
self.role = "headmaster"
class Headteacher(Teacher): # 年级主任 / 班主任
def __init__(self, session):
super().__init__(session)
self.role = "headteacher"
| StarcoderdataPython |
91066 | import numpy as np
import scipy as sp
from scipy.linalg import cho_factor, cho_solve
import time
start_time = time.time()
#float_formatter = '{:.4f}'.format
#np.set_printoptions(formatter={'float_kind':float_formatter})
N = 1000
print('N: ', N)
#Filling N*N array to initialize it
A1 = np.zeros((N,N), float)
A2 = np.zeros((N,N), float)
b1 = np.zeros((N,1), float)
b2 = np.ones((N,1), float)
#Filling arrays with the correspondant values
np.fill_diagonal(A1, 6)
np.fill_diagonal(A1[1:], -4)
np.fill_diagonal(A1[:, 1:], -4)
np.fill_diagonal(A1[2:], 1)
np.fill_diagonal(A1[:, 2:], 1)
np.fill_diagonal(A2, 7)
np.fill_diagonal(A2[1:], -4)
np.fill_diagonal(A2[:, 1:], -4)
np.fill_diagonal(A2[2:], 1)
np.fill_diagonal(A2[:, 2:], 1)
b1[0] = 3
b1[1] = -1
b1[-2] = -1
b1[-1] = 3
b2[0] = 4
b2[1] = 0
b2[-2] = 0
b2[-1] = 4
A, low = cho_factor(A1)
x = cho_solve((A, low), b1)
print('A1 x = b1 \n Ten median x are:')
ml = len(x) // 2 - 5
mu = len(x) // 2 + 5
print(x[ml : mu])
A, low = cho_factor(A2)
x = cho_solve((A, low), b2)
print('A2 x = b2 \n Ten median x are:')
ml = len(x) // 2 - 5
mu = len(x) // 2 + 5
print(x[ml : mu])
print("--- %s seconds ---" % (time.time() - start_time)) | StarcoderdataPython |
1628005 | <filename>net.py
# -*- coding: utf-8 -*-
import sys
sys.path.append('./lib')
import theano
theano.config.on_unused_input = 'warn'
import theano.tensor as T
import numpy as np
from layers import Weight, DataLayer, ConvPoolLayer, DropoutLayer, FCLayer, MaxoutLayer
def cosine(x, y, epsilon=np.array(1e-6).astype(np.float32)):
norm_x = T.sqrt(T.sum(x ** 2, 1)) + epsilon
norm_y = T.sqrt(T.sum(y ** 2, 1)) + epsilon
return T.sum(x * y, 1) / (norm_x * norm_y)
class AlexNet(object):
def __init__(self, config):
self.config = config
batch_size = config['batch_size']
n_images = config['n_images']
# ##################### BUILD NETWORK ##########################
# allocate symbolic variables for the data
# 'rand' is a random array used for random cropping/mirroring of data
xquery = T.ftensor4('xquery') # Trying to find colour variant of this image
xp = T.ftensor4('xp') # Correct colour variant image
xns = [] # Non-variant images
for i in xrange(n_images-1):
xns.append( T.ftensor4('xn'+str(i+1)) )
rands = []
for i in xrange(n_images+1):
rands.append( T.fvector('rand'+str(i)) )
layers, params, weight_types = [], [], []
print '... building the model'
# Get the representations of all input images
query_repr, query_layers, query_params, query_weight_types = \
self.image_repr(xquery, rands[0], config)
layers += query_layers
params += query_params
weight_types += query_weight_types
p_repr, p_layers, p_params, p_weight_types = \
self.image_repr(xp, rands[1], config)
layers += p_layers
params += p_params
weight_types += p_weight_types
n_reprs = []
for i in xrange(n_images-1):
n_repr, n_layers, n_params, n_weight_types = \
self.image_repr(xns[i], rands[i+2], config)
n_reprs.append( n_repr )
layers += n_layers
params += n_params
weight_types += n_weight_types
# Compute cosine distance from query image to target images
sims_ = []
sims_.append( cosine(query_repr.output,
p_repr.output ).dimshuffle(0,'x') )
for i in xrange(n_images-1):
sims_.append( cosine(query_repr.output,
n_reprs[i].output ).dimshuffle(0,'x') )
sims = T.concatenate(sims_, axis=1)
#sims = T.concatenate([ sims[:,1].dimshuffle(0,'x'), sims[:,0].dimshuffle(0,'x') ], axis=1)
# Temp: Permute location of correct colour variant, to check that improvements are real
#rng = T.shared_randomstreams.RandomStreams(12345)
#perm = rng.permutation(size=(sims.shape[0],), n=2)
#sims2 = T.concatenate([ sims[T.arange(sims.shape[0]),perm[:,0]].dimshuffle(0,'x'),
# sims[T.arange(sims.shape[0]),perm[:,1]].dimshuffle(0,'x') ], axis=1)
#index_of_variant = T.argmin(perm, axis=1)
# Compute probabilities
p_y_given_x = T.nnet.softmax(sims)
cost = -T.mean(T.log(p_y_given_x[0, :]))
y_pred = T.argmax(p_y_given_x, axis=1)
errors = T.neq(y_pred, 0) # index_of_variant) # 0)
# #################### NETWORK BUILT #######################
self.testfunc = query_repr.output.shape # sims # errors # T.extra_ops.bincount(y_pred)
self.cost = cost
self.errors = T.mean(errors)
self.errors_top_5 = None
self.xquery = xquery
self.xp = xp
self.xns = xns
self.rands = rands
self.layers = layers
self.params = params
self.weight_types = weight_types
self.batch_size = batch_size
self.n_images = n_images
def image_repr(self, x, rand, config):
batch_size = config['batch_size']
flag_datalayer = config['use_data_layer']
lib_conv = config['lib_conv']
layers = []
params = []
weight_types = []
if flag_datalayer:
data_layer = DataLayer(input=x, image_shape=(3, 256, 256,
batch_size),
cropsize=227, rand=rand, mirror=True,
flag_rand=config['rand_crop'])
layer1_input = data_layer.output
else:
layer1_input = x
convpool_layer1 = ConvPoolLayer(input=layer1_input,
image_shape=(3, 227, 227, batch_size),
filter_shape=(3, 11, 11, 96),
convstride=4, padsize=0, group=1,
poolsize=3, poolstride=2,
bias_init=0.0, lrn=True,
lib_conv=lib_conv,
)
layers.append(convpool_layer1)
params += convpool_layer1.params
weight_types += convpool_layer1.weight_type
convpool_layer2 = ConvPoolLayer(input=convpool_layer1.output,
image_shape=(96, 27, 27, batch_size),
filter_shape=(96, 5, 5, 256),
convstride=1, padsize=2, group=2,
poolsize=3, poolstride=2,
bias_init=0.1, lrn=True,
lib_conv=lib_conv,
)
layers.append(convpool_layer2)
params += convpool_layer2.params
weight_types += convpool_layer2.weight_type
convpool_layer3 = ConvPoolLayer(input=convpool_layer2.output,
image_shape=(256, 13, 13, batch_size),
filter_shape=(256, 3, 3, 384),
convstride=1, padsize=1, group=1,
poolsize=1, poolstride=0,
bias_init=0.0, lrn=False,
lib_conv=lib_conv,
)
layers.append(convpool_layer3)
params += convpool_layer3.params
weight_types += convpool_layer3.weight_type
convpool_layer4 = ConvPoolLayer(input=convpool_layer3.output,
image_shape=(384, 13, 13, batch_size),
filter_shape=(384, 3, 3, 384),
convstride=1, padsize=1, group=2,
poolsize=1, poolstride=0,
bias_init=0.1, lrn=False,
lib_conv=lib_conv,
)
layers.append(convpool_layer4)
params += convpool_layer4.params
weight_types += convpool_layer4.weight_type
convpool_layer5 = ConvPoolLayer(input=convpool_layer4.output,
image_shape=(384, 13, 13, batch_size),
filter_shape=(384, 3, 3, 256),
convstride=1, padsize=1, group=2,
poolsize=3, poolstride=2,
bias_init=0.0, lrn=False,
lib_conv=lib_conv,
)
layers.append(convpool_layer5)
params += convpool_layer5.params
weight_types += convpool_layer5.weight_type
fc_layer6_input = T.flatten(
convpool_layer5.output.dimshuffle(3, 0, 1, 2), 2)
fc_layer6 = MaxoutLayer(input=fc_layer6_input, n_in=9216, n_out=4096)
layers.append(fc_layer6)
params += fc_layer6.params
weight_types += fc_layer6.weight_type
dropout_layer6 = DropoutLayer(fc_layer6.output, n_in=4096, n_out=4096)
fc_layer7 = MaxoutLayer(input=dropout_layer6.output, n_in=4096, n_out=4096)
layers.append(fc_layer7)
params += fc_layer7.params
weight_types += fc_layer7.weight_type
#dropout_layer7 = DropoutLayer(fc_layer7.output, n_in=4096, n_out=4096)
# Rename weight types so that weights can be shared
new_weight_types = []
counter_W = 0
counter_b = 0
for w in weight_types:
if w == 'W':
new_weight_types.append('W'+str(counter_W))
counter_W += 1
elif w == 'b':
new_weight_types.append('b'+str(counter_b))
counter_b += 1
weight_types = new_weight_types
return fc_layer7, layers, params, weight_types
def compile_models(model, config, flag_top_5=False):
xquery = model.xquery
xp = model.xp
xns = model.xns
rands = model.rands
weight_types = model.weight_types
cost = model.cost
params = model.params
errors = model.errors
#errors_top_5 = model.errors_top_5
batch_size = model.batch_size
n_images = model.n_images
mu = config['momentum']
eta = config['weight_decay']
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
updates = []
learning_rate = theano.shared(np.float32(config['learning_rate']))
lr = T.scalar('lr') # symbolic learning rate
if config['use_data_layer']:
raw_size = 256
else:
raw_size = 227
shared_xquery = theano.shared(np.zeros((3, raw_size, raw_size,
batch_size),
dtype=theano.config.floatX),
borrow=True)
shared_xp = theano.shared(np.zeros((3, raw_size, raw_size,
batch_size),
dtype=theano.config.floatX),
borrow=True)
shared_xns = []
for i in xrange(len(xns)):
shared_xn = theano.shared(np.zeros((3, raw_size, raw_size,
batch_size),
dtype=theano.config.floatX),
borrow=True)
shared_xns.append( shared_xn )
rand_arrs = []
for i in xrange(n_images+1):
rand_arr = theano.shared(np.zeros(3, dtype=theano.config.floatX),
borrow=True)
rand_arrs.append( rand_arr )
vels = [theano.shared(param_i.get_value() * 0.)
for param_i in params]
assert len(weight_types) == len(params)
# Shared weights between all image networks
iter_indexes = []
for i in xrange(20):
W_indexes = []
b_indexes = []
for j in xrange(len(weight_types)):
weight_type = weight_types[j]
if weight_type == 'W'+str(i):
W_indexes.append(j)
elif weight_type == 'b'+str(i):
b_indexes.append(j)
if len(W_indexes)>0:
iter_indexes.append(W_indexes)
if len(b_indexes)>0:
iter_indexes.append(b_indexes)
if len(W_indexes)==0 and len(b_indexes)==0:
break
for indexes in iter_indexes:
index_i = indexes[0]
weight_type = weight_types[index_i][0]
param_i = params[index_i]
grad_i = grads[index_i]
vel_i = vels[index_i]
change_i = 0
if config['use_momentum']:
if weight_type == 'W':
real_grad = grad_i + eta * param_i
real_lr = lr
elif weight_type == 'b':
real_grad = grad_i
real_lr = 2. * lr
else:
raise TypeError("Weight Type Error")
if config['use_nesterov_momentum']:
change_i = mu ** 2 * vel_i - (1 + mu) * real_lr * real_grad
else:
change_i = mu * vel_i - real_lr * real_grad
else:
if weight_type == 'W':
change_i = - lr * grad_i - eta * lr * param_i
elif weight_type == 'b':
change_i = - 2 * lr * grad_i
else:
raise TypeError("Weight Type Error")
newval = param_i + change_i
for index in indexes:
param = params[index]
updates.append((param, newval))
if config['use_momentum']:
vel = vels[index]
updates.append((vel, change_i))
#if config['use_momentum']:
# for param_i, grad_i, vel_i, weight_type in \
# zip(params, grads, vels, weight_types):
# if weight_type == 'W':
# real_grad = grad_i + eta * param_i
# real_lr = lr
# elif weight_type == 'b':
# real_grad = grad_i
# real_lr = 2. * lr
# else:
# raise TypeError("Weight Type Error")
# if config['use_nesterov_momentum']:
# vel_i_next = mu ** 2 * vel_i - (1 + mu) * real_lr * real_grad
# else:
# vel_i_next = mu * vel_i - real_lr * real_grad
# updates.append((vel_i, vel_i_next))
# updates.append((param_i, param_i + vel_i_next))
#else:
# for param_i, grad_i, weight_type in zip(params, grads, weight_types):
# #weight_type = weight_type[0]
# if weight_type == 'W':
# updates.append((param_i,
# param_i - lr * grad_i - eta * lr * param_i))
# elif weight_type == 'b':
# updates.append((param_i, param_i - 2 * lr * grad_i))
# else:
# continue
# #raise TypeError("Weight Type Error")
# Define Theano Functions
givens = []
givens.append((lr, learning_rate))
givens.append((xquery, shared_xquery))
givens.append((xp, shared_xp))
for i in xrange(len(xns)):
givens.append((xns[i], shared_xns[i]))
for i in xrange(len(rands)):
givens.append((rands[i], rand_arrs[i]))
train_model = theano.function([], cost, updates=updates,
givens=givens)
validate_outputs = [cost, errors]
#if flag_top_5:
# validate_outputs.append(errors_top_5)
validate_model = theano.function([], validate_outputs, givens=givens)
train_error = theano.function([], errors, givens=givens[1:])
if model.testfunc is not None:
testfunc = theano.function([], model.testfunc, givens=givens)
else:
testfunc = None
#
# Metrics that can be logged to understand cnn better:
#
# Variance & mean of weight matrices at each layer
# Norm of weight matrices along each of their dimensions
# Mean & variance of intermediate representations after each layer
# - Also, mean & variance per class label
# Mean, variance, norm of gradient
# - norm of gradient should not exceed 5 or 15
# Ratio between the update norm and weight norm -> should be around 0.001
#
#
return (train_model, validate_model, train_error, learning_rate,
shared_xquery, shared_xp, shared_xns, rand_arrs, vels, testfunc)
| StarcoderdataPython |
1609583 | <gh_stars>1-10
import os
import pathlib
import shutil
import tempfile
import pytest
import torch
import torchtraining as tt
from torch.utils.tensorboard import SummaryWriter
@pytest.mark.parametrize(
"klass,inputs",
[
(tt.callbacks.tensorboard.Scalar, 15),
(tt.callbacks.tensorboard.Scalar, torch.tensor([15]),),
(tt.callbacks.tensorboard.Scalars, {"foo": 5, "bar": 10}),
(
tt.callbacks.tensorboard.Scalars,
{"foo": torch.tensor([5]), "bar": torch.tensor(10)},
),
(tt.callbacks.tensorboard.Histogram, torch.randint(0, 15, size=(10, 30))),
# Below need pillow
# (tt.callbacks.tensorboard.Image, torch.rand(3, 32, 32)),
# (tt.callbacks.tensorboard.Images, torch.rand(8, 3, 32, 32)),
# Below needs moviepy
# (tt.callbacks.tensorboard.Video, torch.rand(8, 6, 3, 32, 32)),
(tt.callbacks.tensorboard.Audio, torch.rand(1, 100)),
(tt.callbacks.tensorboard.Text, "example_text"),
],
)
def test_tensorboard(klass, inputs):
directory = pathlib.Path(tempfile.mkdtemp())
writer = SummaryWriter(directory)
tb_writer = klass(writer, "example")
tb_writer(inputs)
shutil.rmtree(directory, ignore_errors=True)
| StarcoderdataPython |
3262823 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import theseus as th
from theseus.utils.examples.bundle_adjustment.util import random_small_quaternion
def test_residual():
# unit test for Cost term
torch.manual_seed(0)
batch_size = 4
cam_rot = torch.cat(
[
random_small_quaternion(max_degrees=20).unsqueeze(0)
for _ in range(batch_size)
]
)
cam_tr = torch.rand((batch_size, 3), dtype=torch.float64) * 2 + torch.tensor(
[-1, -1, -5.0], dtype=torch.float64
)
cam_pose_data = torch.cat([cam_tr, cam_rot], dim=1)
cam_pose = th.SE3(cam_pose_data, name="cam_pose")
focal_length = th.Vector(
data=torch.tensor([1000], dtype=torch.float64).repeat(batch_size).unsqueeze(1),
name="focal_length",
)
calib_k1 = th.Vector(
data=torch.tensor([-0.1], dtype=torch.float64).repeat(batch_size).unsqueeze(1),
name="calib_k1",
)
calib_k2 = th.Vector(
data=torch.tensor([0.01], dtype=torch.float64).repeat(batch_size).unsqueeze(1),
name="calib_k2",
)
world_point = th.Vector(
data=torch.rand((batch_size, 3), dtype=torch.float64), name="worldPoint"
)
point_cam = cam_pose.transform_from(world_point).data
proj = -point_cam[:, :2] / point_cam[:, 2:3]
proj_sqn = (proj * proj).sum(dim=1).unsqueeze(1)
proj_factor = focal_length.data * (
1.0 + proj_sqn * (calib_k1.data + proj_sqn * calib_k2.data)
)
point_projection = proj * proj_factor
image_feature_point = th.Vector(
data=point_projection.data + (torch.rand((batch_size, 2)) - 0.5) * 50,
name="image_feature_point",
)
r = th.eb.Reprojection(
camera_pose=cam_pose,
world_point=world_point,
focal_length=focal_length,
calib_k1=calib_k1,
calib_k2=calib_k2,
image_feature_point=image_feature_point,
)
base_err = r.error()
base_camera_pose = r.camera_pose.copy()
base_world_point = r.world_point.copy()
n_err = base_err.shape[1]
pose_num_jac = torch.zeros((batch_size, n_err, 6), dtype=torch.float64)
epsilon = 1e-8
for i in range(6):
v = torch.zeros((batch_size, 6), dtype=torch.float64)
v[:, i] += epsilon
r.camera_pose = base_camera_pose.retract(v)
pert_err = r.error()
pose_num_jac[:, :, i] = (pert_err - base_err) / epsilon
r.camera_pose = base_camera_pose
wpt_num_jac = torch.zeros((batch_size, n_err, 3), dtype=torch.float64)
for i in range(3):
v = torch.zeros((batch_size, 3), dtype=torch.float64)
v[:, i] += epsilon
r.world_point = base_world_point.retract(v)
pert_err = r.error()
wpt_num_jac[:, :, i] = (pert_err - base_err) / epsilon
(pose_jac, wpt_jac), _ = r.jacobians()
assert torch.norm(pose_num_jac - pose_jac) < 5e-5
assert torch.norm(wpt_num_jac - wpt_jac) < 5e-5
| StarcoderdataPython |
3367579 | import pytest
from unittest import TestCase
from pyflamegpu import *
import random as rand
AGENT_COUNT = 2049
out_mandatory2D = """
FLAMEGPU_AGENT_FUNCTION(out_mandatory2D, flamegpu::MessageNone, flamegpu::MessageSpatial2D) {
FLAMEGPU->message_out.setVariable<int>("id", FLAMEGPU->getVariable<int>("id"));
FLAMEGPU->message_out.setLocation(
FLAMEGPU->getVariable<float>("x"),
FLAMEGPU->getVariable<float>("y"));
return flamegpu::ALIVE;
}
"""
out_optional2D = """
FLAMEGPU_AGENT_FUNCTION(out_optional2D, flamegpu::MessageNone, flamegpu::MessageSpatial2D) {
if (FLAMEGPU->getVariable<int>("do_output")) {
FLAMEGPU->message_out.setVariable<int>("id", FLAMEGPU->getVariable<int>("id"));
FLAMEGPU->message_out.setLocation(
FLAMEGPU->getVariable<float>("x"),
FLAMEGPU->getVariable<float>("y"));
}
return flamegpu::ALIVE;
}
"""
in2D = """
FLAMEGPU_AGENT_FUNCTION(in2D, flamegpu::MessageSpatial2D, flamegpu::MessageNone) {
const float x1 = FLAMEGPU->getVariable<float>("x");
const float y1 = FLAMEGPU->getVariable<float>("y");
unsigned int count = 0;
unsigned int badCount = 0;
unsigned int myBin[2] = {
static_cast<unsigned int>(x1),
static_cast<unsigned int>(y1)
};
// Count how many messages we received (including our own)
// This is all those which fall within the 3x3x3 Moore neighbourhood
// Not our search radius
for (const auto &message : FLAMEGPU->message_in(x1, y1)) {
unsigned int messageBin[2] = {
static_cast<unsigned int>(message.getVariable<float>("x")),
static_cast<unsigned int>(message.getVariable<float>("y"))
};
bool isBad = false;
for (unsigned int i = 0; i < 2; ++i) { // Iterate axis
int binDiff = myBin[i] - messageBin[i];
if (binDiff > 1 || binDiff < -1) {
isBad = true;
}
}
count++;
badCount = isBad ? badCount + 1 : badCount;
}
FLAMEGPU->setVariable<unsigned int>("count", count);
FLAMEGPU->setVariable<unsigned int>("badCount", badCount);
return flamegpu::ALIVE;
}
"""
count2D = """
FLAMEGPU_AGENT_FUNCTION(count2D, flamegpu::MessageSpatial2D, flamegpu::MessageNone) {
unsigned int count = 0;
// Count how many messages we received (including our own)
// This is all those which fall within the 3x3 Moore neighbourhood
for (const auto &message : FLAMEGPU->message_in(0, 0)) {
count++;
}
FLAMEGPU->setVariable<unsigned int>("count", count);
return flamegpu::ALIVE;
}
"""
class Spatial2DMessageTest(TestCase):
def test_Mandatory(self):
bin_counts = {}
# Construct model
model = pyflamegpu.ModelDescription("Spatial2DMessageTestModel")
# Location message
message = model.newMessageSpatial2D("location")
message.setMin(0, 0)
message.setMax(11, 11)
message.setRadius(1)
# 11x11 bins, total 121
message.newVariableInt("id") # unused by current test
# Circle agent
agent = model.newAgent("agent")
agent.newVariableInt("id")
agent.newVariableFloat("x")
agent.newVariableFloat("y")
agent.newVariableUInt("myBin") # This will be presumed bin index of the agent, might not use this
agent.newVariableUInt("count") # Store the distance moved here, for validation
agent.newVariableUInt("badCount") # Store how many messages are out of range
of = agent.newRTCFunction("out", out_mandatory2D)
of.setMessageOutput("location")
inf = agent.newRTCFunction("in", in2D)
inf.setMessageInput("location")
# Layer #1
layer = model.newLayer()
layer.addAgentFunction(of)
# Layer #2
layer = model.newLayer()
layer.addAgentFunction(inf)
cudaSimulation = pyflamegpu.CUDASimulation(model)
population = pyflamegpu.AgentVector(model.Agent("agent"), AGENT_COUNT)
# Initialise agents (TODO)
# Currently population has not been init, so generate an agent population on the fly
for i in range(AGENT_COUNT):
instance = population[i]
instance.setVariableInt("id", i)
pos = [rand.uniform(0.0, 11.0), rand.uniform(0.0, 11.0)]
instance.setVariableFloat("x", pos[0])
instance.setVariableFloat("y", pos[1])
# Solve the bin index
bin_pos = [int(pos[0]), int(pos[1])]
bin_index = bin_pos[1] * 11 + bin_pos[0]
instance.setVariableUInt("myBin", bin_index)
# Create it if it doesn't already exist
if not bin_index in bin_counts:
bin_counts[bin_index] = 0
# increment bin count
bin_counts[bin_index] += 1
cudaSimulation.setPopulationData(population)
# Generate results expectation
bin_results = {}
# Iterate host bin
for x1 in range(11):
for y1 in range(11):
# Solve the bin index
bin_pos1 = [x1, y1]
bin_index1 = bin_pos1[1] * 11 + bin_pos1[0]
# Count our neighbours
count_sum = 0
for x2 in range(-1, 2):
bin_pos2 = [bin_pos1[0] + x2, 0]
for y2 in range(-1, 2):
bin_pos2[1] = bin_pos1[1] + y2
# Ensure bin is in bounds
if (bin_pos2[0] >= 0 and
bin_pos2[1] >= 0 and
bin_pos2[0] < 11 and
bin_pos2[1] < 11):
bin_index2 = bin_pos2[1] * 11 + bin_pos2[0]
count_sum += bin_counts[bin_index2]
bin_results[bin_index1] = count_sum
# Execute a single step of the model
cudaSimulation.step()
# Recover the results and check they match what was expected
cudaSimulation.getPopulationData(population)
# Validate each agent has same result
badCountWrong = 0
for ai in population:
myBin = ai.getVariableUInt("myBin")
myResult = ai.getVariableUInt("count")
assert myResult == bin_results[myBin]
if ai.getVariableUInt("badCount"):
badCountWrong += 1
assert badCountWrong == 0
def test_Optional(self):
"""
This test is same as Mandatory, however extra flag has been added to block certain agents from outputting messages
"""
bin_counts = {}
bin_counts_optional = {}
# Construct model
model = pyflamegpu.ModelDescription("Spatial2DMessageTestModel")
# Location message
message = model.newMessageSpatial2D("location")
message.setMin(0, 0)
message.setMax(11, 11)
message.setRadius(1)
# 11x11 bins, total 121
message.newVariableInt("id") # unused by current test
# Circle agent
agent = model.newAgent("agent")
agent.newVariableInt("id")
agent.newVariableFloat("x")
agent.newVariableFloat("y")
agent.newVariableInt("do_output")
agent.newVariableUInt("myBin") # This will be presumed bin index of the agent, might not use this
agent.newVariableUInt("count") # Store the distance moved here, for validation
agent.newVariableUInt("badCount") # Store how many messages are out of range
of = agent.newRTCFunction("out", out_optional2D)
of.setMessageOutput("location")
of.setMessageOutputOptional(True);
inf = agent.newRTCFunction("in", in2D)
inf.setMessageInput("location")
# Layer #1
layer = model.newLayer()
layer.addAgentFunction(of)
# Layer #2
layer = model.newLayer()
layer.addAgentFunction(inf)
cudaSimulation = pyflamegpu.CUDASimulation(model)
population = pyflamegpu.AgentVector(model.Agent("agent"), AGENT_COUNT)
# Initialise agents (TODO)
# Currently population has not been init, so generate an agent population on the fly
for i in range(AGENT_COUNT):
instance = population[i]
instance.setVariableInt("id", i)
pos = [rand.uniform(0.0, 11.0), rand.uniform(0.0, 11.0)]
if rand.uniform(0.0, 5.0) < 4.0: # 80% chance of output
do_output = 1
else:
do_output = 0
instance.setVariableFloat("x", pos[0])
instance.setVariableFloat("y", pos[1])
instance.setVariableInt("do_output", do_output)
# Solve the bin index
bin_pos = [int(pos[0]), int(pos[1])]
bin_index = bin_pos[1] * 11 + bin_pos[0]
instance.setVariableUInt("myBin", bin_index)
# Create key if it doesn't already exist
if not bin_index in bin_counts:
bin_counts[bin_index] = 0
bin_counts[bin_index] += 1
if (do_output) :
if not bin_index in bin_counts_optional:
bin_counts_optional[bin_index] = 0
bin_counts_optional[bin_index] += 1
cudaSimulation.setPopulationData(population)
# Generate results expectation
bin_results = {}
bin_results_optional = {}
# Iterate host bin
for x1 in range(11):
for y1 in range(11):
# Solve the bin index
bin_pos1 = [x1, y1]
bin_index1 = bin_pos1[1] * 11 + bin_pos1[0]
# Count our neighbours
count_sum = 0
count_sum_optional = 0
for x2 in range(-1, 2):
bin_pos2 = [bin_pos1[0] + x2, 0]
for y2 in range(-1, 2):
bin_pos2[1] = bin_pos1[1] + y2
# Ensure bin is in bounds
if (bin_pos2[0] >= 0 and
bin_pos2[1] >= 0 and
bin_pos2[0] < 11 and
bin_pos2[1] < 11):
bin_index2 = bin_pos2[1] * 11 + bin_pos2[0]
count_sum += bin_counts[bin_index2]
count_sum_optional += bin_counts_optional[bin_index2]
bin_results[bin_index1] = count_sum
bin_results_optional[bin_index1] = count_sum_optional
# Execute a single step of the model
cudaSimulation.step()
# Recover the results and check they match what was expected
cudaSimulation.getPopulationData(population)
# Validate each agent has same result
badCountWrong = 0
for ai in population:
myBin = ai.getVariableUInt("myBin")
myResult = ai.getVariableUInt("count")
#assert myResult == bin_results[myBin]
assert myResult == bin_results_optional[myBin]
if ai.getVariableUInt("badCount"):
badCountWrong += 1
assert badCountWrong == 0
def test_BadRadius(self):
model = pyflamegpu.ModelDescription("Spatial2DMessageTestModel")
message = model.newMessageSpatial2D("location")
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setRadius(0)
assert e.value.type() == "InvalidArgument"
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setRadius(-10)
assert e.value.type() == "InvalidArgument"
def test_BadMin(self):
model = pyflamegpu.ModelDescription("Spatial2DMessageTestModel")
message = model.newMessageSpatial2D("location")
message.setMax(5, 5)
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setMin(5, 0)
assert e.value.type() == "InvalidArgument"
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setMin(0, 5)
assert e.value.type() == "InvalidArgument"
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setMin(6, 0)
assert e.value.type() == "InvalidArgument"
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setMin(0, 6)
assert e.value.type() == "InvalidArgument"
def test_BadMax(self):
model = pyflamegpu.ModelDescription("Spatial2DMessageTestModel")
message = model.newMessageSpatial2D("location")
message.setMin(5, 5)
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setMax(5, 0)
assert e.value.type() == "InvalidArgument"
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setMax(0, 5)
assert e.value.type() == "InvalidArgument"
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setMax(4, 0)
assert e.value.type() == "InvalidArgument"
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.setMax(0, 4)
assert e.value.type() == "InvalidArgument"
def test_UnsetMax(self):
model = pyflamegpu.ModelDescription("Spatial2DMessageTestModel")
message = model.newMessageSpatial2D("location")
message.setMin(5, 5)
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
m = pyflamegpu.CUDASimulation(model)
assert e.value.type() == "InvalidMessage"
def test_UnsetMin(self):
model = pyflamegpu.ModelDescription("Spatial2DMessageTestModel")
message = model.newMessageSpatial2D("location")
message.setMin(5, 5)
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
m = pyflamegpu.CUDASimulation(model)
assert e.value.type() == "InvalidMessage"
def test_reserved_name(self):
model = pyflamegpu.ModelDescription("Spatial2DMessageTestModel")
message = model.newMessageSpatial2D("location")
with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:
message.newVariableInt("_")
assert e.value.type() == "ReservedName"
def test_ReadEmpty(self):
# What happens if we read a message list before it has been output?
model = pyflamegpu.ModelDescription("Model")
# Location message
message = model.newMessageSpatial2D("location")
message.setMin(-3, -3)
message.setMax(3, 3)
message.setRadius(2)
message.newVariableInt("id") # unused by current test
# Circle agent
agent = model.newAgent("agent")
agent.newVariableUInt("count", 0) # Count the number of messages read
fin = agent.newRTCFunction("in", count2D)
fin.setMessageInput("location")
# Layer #1
layer = model.newLayer()
layer.addAgentFunction(fin)
# Create 1 agent
pop_in = pyflamegpu.AgentVector(model.Agent("agent"), 1)
cudaSimulation = pyflamegpu.CUDASimulation(model)
cudaSimulation.setPopulationData(pop_in)
# Execute model
cudaSimulation.step()
# Check result
pop_out = pyflamegpu.AgentVector(model.Agent("agent"), 1)
pop_out[0].setVariableUInt("count", 1)
cudaSimulation.getPopulationData(pop_out)
assert len(pop_out) == 1
ai = pop_out[0]
assert ai.getVariableUInt("count") == 0 | StarcoderdataPython |
19336 | import gpsd
import json
import logging
import socket
import httpx
import paho.mqtt.client as mqtt
class MQTTReporter:
def __init__(self, name, mqtt_server=None, gps_server=None, compass=False):
self.name = name
self.mqtt_server = mqtt_server
self.compass = compass
self.gps_server = gps_server
self.mqttc = None
self.bearing = 'no bearing'
def connect(self):
logging.info(f'connecting to {self.mqtt_server}')
self.mqttc = mqtt.Client()
self.mqttc.connect(self.mqtt_server)
self.mqttc.loop_start()
if self.gps_server:
gpsd.connect(host=self.gps_server, port=2947)
def get_bearing(self):
try:
self.bearing = str(float(httpx.get(f'http://{self.gps_server}:8000/v1/').text))
except Exception as err:
logging.error('could not update bearing: %s', err)
def add_gps(self, publish_args):
if not self.gps_server:
return publish_args
publish_args.update({
'position': [0, 0],
'altitude': None,
'gps_time': None,
'map_url': None,
'bearing': self.bearing,
'gps': 'no fix'})
try:
if self.compass:
self.get_bearing()
packet = gpsd.get_current()
publish_args.update({
'position': packet.position(),
'altitude': packet.altitude(),
'gps_time': packet.get_time().timestamp(),
'map_url': packet.map_url(),
'bearing': self.bearing,
'gps': 'fix'})
except (gpsd.NoFixError, AttributeError) as err:
logging.error('could not update with GPS: %s', err)
return publish_args
def publish(self, publish_path, publish_args):
if not self.mqtt_server:
return
try:
if self.mqttc is None:
self.connect()
publish_args = self.add_gps(publish_args)
publish_args['name'] = self.name
self.mqttc.publish(publish_path, json.dumps(publish_args))
except (socket.gaierror, ConnectionRefusedError, mqtt.WebsocketConnectionError, ValueError) as err:
logging.error(f'failed to publish to MQTT {self.mqtt_server}: {err}')
| StarcoderdataPython |
160743 | #!/usr/bin/env python3
import os
import json
import torch
from misc_scripts import run_cl_exp, run_rep_exp
from utils import get_mini_imagenet, get_omniglot
from core_functions.vision import evaluate
from core_functions.vision_models import OmniglotCNN, MiniImagenetCNN, ConvBase
from core_functions.maml import MAML
cuda = True
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/maml_5w5s_min_31_03_12h53_1_1434"
meta_test = False
eval_iters = False
cl_exp = False
rep_exp = True
cl_params = {
"adapt_steps": 1,
"inner_lr": 0.5,
"n_tasks": 5
}
rep_params = {
"adapt_steps": 1,
"inner_lr": 0.5,
"n_tasks": 5,
"layers": [1, 2, 3, 4, -1], # MIN
# "layers": [2, 4] # Omni
}
class Lambda(torch.nn.Module):
def __init__(self, fn):
super(Lambda, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
def run(path):
# Initialize
with open(path + "/logger.json", "r") as f:
params = json.load(f)['config']
device = torch.device('cpu')
torch.manual_seed(params['seed'])
if cuda and torch.cuda.device_count():
torch.cuda.manual_seed(params['seed'])
device = torch.device('cuda')
if "min" in path:
_, _, test_tasks = get_mini_imagenet(params['ways'], params['shots'])
else:
_, _, test_tasks = get_omniglot(params['ways'], params['shots'])
if "maml" in path:
run_maml(params, test_tasks, device)
else:
run_anil(params, test_tasks, device)
def run_maml(params, test_tasks, device):
if 'min' == params['dataset']:
print('Loading Mini-ImageNet model')
model = MiniImagenetCNN(params['ways'])
else:
print('Loading Omniglot model')
model = OmniglotCNN(params['ways'])
# Evaluate the model at every checkpoint
if eval_iters:
ckpnt = base_path + "/model_checkpoints/"
model_ckpnt_results = {}
for model_ckpnt in os.scandir(ckpnt):
if model_ckpnt.path.endswith(".pt"):
print(f'Testing {model_ckpnt.path}')
res = evaluate_maml(params, model, test_tasks, device, model_ckpnt.path)
model_ckpnt_results[model_ckpnt.path] = res
with open(base_path + '/ckpnt_results.json', 'w') as fp:
json.dump(model_ckpnt_results, fp, sort_keys=True, indent=4)
final_model = base_path + '/model.pt'
if meta_test:
evaluate_maml(params, model, test_tasks, device, final_model)
# Run a Continual Learning experiment
if cl_exp:
print("Running Continual Learning experiment...")
model.load_state_dict(torch.load(final_model))
model.to(device)
maml = MAML(model, lr=cl_params['inner_lr'], first_order=False)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
run_cl_exp(base_path, maml, loss, test_tasks, device,
params['ways'], params['shots'], cl_params=cl_params)
# Run a Representation change experiment
if rep_exp:
model.load_state_dict(torch.load(final_model))
model.to(device)
maml = MAML(model, lr=rep_params['inner_lr'], first_order=False)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
print("Running Representation experiment...")
run_rep_exp(base_path, maml, loss, test_tasks, device,
params['ways'], params['shots'], rep_params=rep_params)
def run_anil(params, test_tasks, device):
# ANIL
if 'omni' == params['dataset']:
print('Loading Omniglot model')
fc_neurons = 128
features = ConvBase(output_size=64, hidden=32, channels=1, max_pool=False)
else:
print('Loading Mini-ImageNet model')
fc_neurons = 1600
features = ConvBase(output_size=64, channels=3, max_pool=True)
features = torch.nn.Sequential(features, Lambda(lambda x: x.view(-1, fc_neurons)))
head = torch.nn.Linear(fc_neurons, params['ways'])
head = MAML(head, lr=params['inner_lr'])
# Evaluate the model at every checkpoint
if eval_iters:
ckpnt = base_path + "/model_checkpoints/"
model_ckpnt_results = {}
for model_ckpnt in os.scandir(ckpnt):
if model_ckpnt.path.endswith(".pt"):
if "features" in model_ckpnt.path:
features_path = model_ckpnt.path
head_path = str.replace(features_path, "features", "head")
print(f'Testing {model_ckpnt.path}')
res = evaluate_anil(params, features, head, test_tasks, device, features_path, head_path)
model_ckpnt_results[model_ckpnt.path] = res
with open(base_path + '/ckpnt_results.json', 'w') as fp:
json.dump(model_ckpnt_results, fp, sort_keys=True, indent=4)
final_features = base_path + '/features.pt'
final_head = base_path + '/head.pt'
if meta_test:
evaluate_anil(params, features, head, test_tasks, device, final_features, final_head)
if cl_exp:
print("Running Continual Learning experiment...")
features.load_state_dict(torch.load(final_features))
features.to(device)
head.load_state_dict(torch.load(final_head))
head.to(device)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
run_cl_exp(base_path, head, loss, test_tasks, device,
params['ways'], params['shots'], cl_params=cl_params, features=features)
if rep_exp:
features.load_state_dict(torch.load(final_features))
features.to(device)
head.load_state_dict(torch.load(final_head))
head.to(device)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
# Only check head change
rep_params['layers'] = [-1]
print("Running Representation experiment...")
run_rep_exp(base_path, head, loss, test_tasks, device,
params['ways'], params['shots'], rep_params=rep_params, features=features)
def evaluate_maml(params, model, test_tasks, device, path):
model.load_state_dict(torch.load(path))
model.to(device)
maml = MAML(model, lr=params['inner_lr'], first_order=False)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
return evaluate(params, test_tasks, maml, loss, device)
def evaluate_anil(params, features, head, test_tasks, device, features_path, head_path):
features.load_state_dict(torch.load(features_path))
features.to(device)
head.load_state_dict(torch.load(head_path))
head = MAML(head, lr=params['inner_lr'])
head.to(device)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
return evaluate(params, test_tasks, head, loss, device, features=features)
if __name__ == '__main__':
run(base_path)
exit()
# MIN
# ANIL 5w1s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/anil_5w1s_min_10_09_10h08_3_8815"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/anil_5w1s_min_10_09_11h06_2_2906"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/anil_5w1s_min_10_09_11h59_1_1374"
# MAML 5w1s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/maml_5w1s_min_10_09_12h58_3_2722"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/maml_5w1s_min_10_09_15h12_1_9323"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w1s/maml_5w1s_min_10_09_17h09_2_6302"
# ANIL 5w5s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/anil_5w5s_min_11_09_00h36_1_6461"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/anil_5w5s_min_11_09_03h38_2_8655"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/anil_5w5s_min_11_09_05h56_3_6285"
# MAML 5w5s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/maml_5w5s_min_31_03_12h53_1_1434"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/maml_5w5s_min_31_03_12h54_2_1671"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/mini_imagenet/5w5s/maml_5w5s_min_31_03_12h54_3_2104"
# Omni
# ANIL 20w1s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/anil_20w1s_omni_06_09_11h17_1_4305"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/anil_20w1s_omni_06_09_11h17_2_8126"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/anil_20w1s_omni_06_09_11h17_3_4772"
# MAML 20w1s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/maml_20w1s_omni_31_03_10h18_1_9247"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/maml_20w1s_omni_31_03_10h21_2_302"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w1s/maml_20w1s_omni_31_03_10h22_3_7628"
# ANIL 20w5s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/anil/anil_20w5s_omni_09_09_13h23_2_4977"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/anil/anil_20w5s_omni_09_09_13h24_1_775"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/anil/anil_20w5s_omni_09_09_14h31_3_5663"
# MAML 20w5s
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/maml/maml_20w5s_omni_31_03_10h23_1_6864"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/maml/maml_20w5s_omni_31_03_10h24_2_1576"
base_path = "/home/kosz/Projects/KTH/Thesis/models/vision/omniglot/20w5s/maml/maml_20w5s_omni_31_03_10h24_3_8259"
| StarcoderdataPython |
3309293 | from __future__ import print_function, absolute_import
import os
import subprocess
import sys
import threading
import warnings
import numpy as np
from numba import jit, autojit, SmartArray, cuda, config
from numba.errors import (NumbaDeprecationWarning,
NumbaPendingDeprecationWarning, NumbaWarning)
import numba.unittest_support as unittest
from numba.targets.imputils import iternext_impl
class TestDeprecation(unittest.TestCase):
def test_autojit(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
def dummy():
pass
autojit(dummy)
self.assertEqual(len(w), 1)
def check_warning(self, warnings, expected_str, category):
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0].category, category)
self.assertIn(expected_str, str(warnings[0].message))
self.assertIn("http://numba.pydata.org", str(warnings[0].message))
def test_jitfallback(self):
# tests that @jit falling back to object mode raises a
# NumbaDeprecationWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", category=NumbaWarning)
warnings.simplefilter("always", category=NumbaDeprecationWarning)
def foo():
return [] # empty list cannot be typed
jit(foo)()
msg = ("Fall-back from the nopython compilation path to the object "
"mode compilation path")
self.check_warning(w, msg, NumbaDeprecationWarning)
def test_reflection_of_mutable_container(self):
# tests that reflection in list/set warns
def foo_list(a):
return a.append(1)
def foo_set(a):
return a.add(1)
for f in [foo_list, foo_set]:
container = f.__name__.strip('foo_')
inp = eval(container)([10, ])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", category=NumbaWarning)
warnings.simplefilter("always",
category=NumbaPendingDeprecationWarning)
jit(nopython=True)(f)(inp)
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, NumbaPendingDeprecationWarning)
warn_msg = str(w[0].message)
msg = ("Encountered the use of a type that is scheduled for "
"deprecation")
self.assertIn(msg, warn_msg)
msg = ("\'reflected %s\' found for argument" % container)
self.assertIn(msg, warn_msg)
self.assertIn("http://numba.pydata.org", warn_msg)
def test_smartarray(self):
# tests deprecation of SmartArray
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", category=NumbaWarning)
warnings.simplefilter("always", category=NumbaDeprecationWarning)
SmartArray(np.zeros(1))
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, NumbaDeprecationWarning)
warn_msg = str(w[0].message)
msg = "SmartArray is deprecated"
self.assertIn(msg, warn_msg)
self.assertIn("http://numba.pydata.org", warn_msg)
def test_iternext_impl(self):
# tests deprecation of iternext_impl without a RefType supplied
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=NumbaDeprecationWarning)
@iternext_impl
def foo(ctx, builder, sig, args, res):
pass
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, NumbaDeprecationWarning)
warn_msg = str(w[0].message)
msg = ("The use of iternext_impl without specifying a "
"numba.targets.imputils.RefType is deprecated")
def run_cmd(self, cmdline, env, kill_is_ok=False):
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=True)
# finish in 20s or kill it, there's no work being done
def kill():
popen.stdout.flush()
popen.stderr.flush()
popen.kill()
timeout = threading.Timer(20., kill)
try:
timeout.start()
out, err = popen.communicate()
retcode = popen.returncode
if retcode != 0:
raise AssertionError("process failed with code %s: stderr "
"follows\n%s\nstdout :%s" % (retcode,
err.decode(),
out.decode()))
return out.decode(), err.decode()
finally:
timeout.cancel()
return None, None
@unittest.skipIf(not cuda.is_available() or config.ENABLE_CUDASIM,
"Needs real CUDA stack")
def test_numbapro_vars(self):
# tests deprecation of NUMBAPRO_ environment variables
expected = ("Environment variables with the 'NUMBAPRO' prefix are "
"deprecated, found use of %s=%s")
NUMBAPRO_VARS = [(['NVVM', 'CUDALIB', 'LIBDEVICE'], '/'),
(['VERBOSE_CU_JIT_LOG', ], '1')]
# NUMBAPRO_CUDA_LOG_SIZE is not tested, needs a live module/linker
for varz, val in NUMBAPRO_VARS:
for v in varz:
numbapro_var = 'NUMBAPRO_%s' % v
env_copy = os.environ.copy()
env_copy[numbapro_var] = val
call = "'from numba.cuda.cudadrv.libs import test; test()'"
cmdline = [sys.executable, "-c", call]
out, err = self.run_cmd(' '.join(cmdline), env_copy)
self.assertIn('NumbaDeprecationWarning:', err)
self.assertIn(expected % (numbapro_var, val), err)
self.assertIn("http://numba.pydata.org", err)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
84965 | <reponame>piyushrpt/isce3<filename>python/packages/isce3/core/Ellipsoid.py
#-*- coding: utf-8 -*-
# Import the extension
from .. import isceextension
class Ellipsoid(isceextension.pyEllipsoid):
"""
Wrapper for pyEllipsoid.
"""
pass
| StarcoderdataPython |
1677597 | """
Ray factory
classes that provide vertex and triangle information for rays on spheres
Example:
rays = Rays_Tetra(n_level = 4)
print(rays.vertices)
print(rays.faces)
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from scipy.spatial import ConvexHull
import warnings
class Rays_Base(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
self._vertices, self._faces = self.setup_vertices_faces()
self._vertices = np.asarray(self._vertices, np.float32)
self._faces = np.asarray(self._faces, np.int)
self._faces = np.asanyarray(self._faces)
def setup_vertices_faces(self):
"""has to return
verts , faces
verts = ( (z_1,y_1,x_1), ... )
faces ( (0,1,2), (2,3,4), ... )
"""
raise NotImplementedError()
@property
def vertices(self):
return self._vertices.copy()
@property
def faces(self):
return self._faces.copy()
def __getitem__(self, i):
return self.vertices[i]
def __len__(self):
return len(self._vertices)
def __repr__(self):
def _conv(x):
if isinstance(x,(tuple, list, np.ndarray)):
return "_".join(_conv(_x) for _x in x)
if isinstance(x,float):
return "%.2f"%x
return str(x)
return "%s_%s" % (self.__class__.__name__, "_".join("%s_%s" % (k, _conv(v)) for k, v in sorted(self.kwargs.items())))
def to_json(self):
return {
"name": self.__class__.__name__,
"kwargs": self.kwargs
}
def dist_loss_weights(self, anisotropy = (1,1,1)):
"""returns the anisotropy corrected weights for each ray"""
anisotropy = np.array(anisotropy)
assert anisotropy.shape == (3,)
return np.linalg.norm(self.vertices*anisotropy, axis = -1)
def rays_from_json(d):
return eval(d["name"])(**d["kwargs"])
################################################################
class Rays_Cartesian(Rays_Base):
def __init__(self, n_rays_x=11, n_rays_z=5):
super().__init__(n_rays_x=n_rays_x, n_rays_z=n_rays_z)
def setup_vertices_faces(self):
"""has to return list of ( (z_1,y_1,x_1), ... ) _"""
n_rays_x, n_rays_z = self.kwargs["n_rays_x"], self.kwargs["n_rays_z"]
dphi = np.float32(2. * np.pi / n_rays_x)
dtheta = np.float32(np.pi / n_rays_z)
verts = []
for mz in range(n_rays_z):
for mx in range(n_rays_x):
phi = mx * dphi
theta = mz * dtheta
if mz == 0:
theta = 1e-12
if mz == n_rays_z - 1:
theta = np.pi - 1e-12
dx = np.cos(phi) * np.sin(theta)
dy = np.sin(phi) * np.sin(theta)
dz = np.cos(theta)
if mz == 0 or mz == n_rays_z - 1:
dx += 1e-12
dy += 1e-12
verts.append([dz, dy, dx])
verts = np.array(verts)
def _ind(mz, mx):
return mz * n_rays_x + mx
faces = []
for mz in range(n_rays_z - 1):
for mx in range(n_rays_x):
faces.append([_ind(mz, mx), _ind(mz + 1, (mx + 1) % n_rays_x), _ind(mz, (mx + 1) % n_rays_x)])
faces.append([_ind(mz, mx), _ind(mz + 1, mx), _ind(mz + 1, (mx + 1) % n_rays_x)])
faces = np.array(faces)
return verts, faces
class Rays_SubDivide(Rays_Base):
"""
Subdivision polyehdra
n_level = 1 -> base polyhedra
n_level = 2 -> 1x subdivision
n_level = 3 -> 2x subdivision
...
"""
def __init__(self, n_level=4):
super().__init__(n_level=n_level)
def base_polyhedron(self):
raise NotImplementedError()
def setup_vertices_faces(self):
n_level = self.kwargs["n_level"]
verts0, faces0 = self.base_polyhedron()
return self._recursive_split(verts0, faces0, n_level)
def _recursive_split(self, verts, faces, n_level):
if n_level <= 1:
return verts, faces
else:
verts, faces = Rays_SubDivide.split(verts, faces)
return self._recursive_split(verts, faces, n_level - 1)
@classmethod
def split(self, verts0, faces0):
"""split a level"""
split_edges = dict()
verts = list(verts0[:])
faces = []
def _add(a, b):
""" returns index of middle point and adds vertex if not already added"""
edge = tuple(sorted((a, b)))
if not edge in split_edges:
v = .5 * (verts[a] + verts[b])
v *= 1. / np.linalg.norm(v)
verts.append(v)
split_edges[edge] = len(verts) - 1
return split_edges[edge]
for v1, v2, v3 in faces0:
ind1 = _add(v1, v2)
ind2 = _add(v2, v3)
ind3 = _add(v3, v1)
faces.append([v1, ind1, ind3])
faces.append([v2, ind2, ind1])
faces.append([v3, ind3, ind2])
faces.append([ind1, ind2, ind3])
return verts, faces
class Rays_Tetra(Rays_SubDivide):
"""
Subdivision of a tetrahedron
n_level = 1 -> normal tetrahedron (4 vertices)
n_level = 2 -> 1x subdivision (10 vertices)
n_level = 3 -> 2x subdivision (34 vertices)
...
"""
def base_polyhedron(self):
verts = np.array([
[np.sqrt(8. / 9), 0., -1. / 3],
[-np.sqrt(2. / 9), np.sqrt(2. / 3), -1. / 3],
[-np.sqrt(2. / 9), -np.sqrt(2. / 3), -1. / 3],
[0., 0., 1.]
])
faces = [[0, 1, 2],
[0, 3, 1],
[0, 2, 3],
[1, 3, 2]]
return verts, faces
class Rays_Octo(Rays_SubDivide):
"""
Subdivision of a tetrahedron
n_level = 1 -> normal Octahedron (6 vertices)
n_level = 2 -> 1x subdivision (18 vertices)
n_level = 3 -> 2x subdivision (66 vertices)
"""
def base_polyhedron(self):
verts = np.array([
[0, 0, 1],
[0, 1, 0],
[0, 0, -1],
[0, -1, 0],
[1, 0, 0],
[-1, 0, 0]])
faces = [[0, 1, 4],
[0, 5, 1],
[1, 2, 4],
[1, 5, 2],
[2, 3, 4],
[2, 5, 3],
[3, 0, 4],
[3, 5, 0],
]
return verts, faces
def reorder_faces(verts, faces):
"""reorder faces such that their orientation points outward"""
def _single(face):
return face[::-1] if np.linalg.det(verts[face])>0 else face
return tuple(map(_single, faces))
class Rays_GoldenSpiral(Rays_Base):
def __init__(self, n=70, anisotropy = None):
if n<4:
raise ValueError("At least 4 points have to be given!")
super().__init__(n=n, anisotropy = anisotropy if anisotropy is None else tuple(anisotropy))
def setup_vertices_faces(self):
n = self.kwargs["n"]
anisotropy = self.kwargs["anisotropy"]
if anisotropy is None:
anisotropy = np.ones(3)
else:
anisotropy = np.array(anisotropy)
# the smaller golden angle = 2pi * 0.3819...
g = (3. - np.sqrt(5.)) * np.pi
phi = g * np.arange(n)
# z = np.linspace(-1, 1, n + 2)[1:-1]
# rho = np.sqrt(1. - z ** 2)
# verts = np.stack([rho*np.cos(phi), rho*np.sin(phi),z]).T
#
z = np.linspace(-1, 1, n)
rho = np.sqrt(1. - z ** 2)
verts = np.stack([z, rho * np.sin(phi), rho * np.cos(phi)]).T
# warnings.warn("ray definition has changed! Old results are invalid!")
# correct for anisotropy
verts = verts/anisotropy
#verts /= np.linalg.norm(verts, axis=-1, keepdims=True)
hull = ConvexHull(verts)
faces = reorder_faces(verts,hull.simplices)
verts /= np.linalg.norm(verts, axis=-1, keepdims=True)
return verts, faces
| StarcoderdataPython |
1680407 | <filename>mfr/extensions/jasp/exceptions.py
from mfr.core.exceptions import RendererError
class JaspRendererError(RendererError):
def __init__(self, message, *args, **kwargs):
super().__init__(message, *args, renderer_class='jasp', **kwargs)
class JaspVersionError(JaspRendererError):
"""The jasp related errors raised from a :class:`mfr.extentions.jasp` and relating to minimum
data archive version should throw or subclass JaspVersionError.
"""
__TYPE = 'jasp_version'
def __init__(self, message, *args, code: int=400, created_by: str='',
actual_version: str='', required_version: str='', **kwargs):
super().__init__(message, *args, code=code, **kwargs)
self.created_by = created_by
self.actual_version = actual_version
self.required_version = required_version
self.attr_stack.append([self.__TYPE, {
'created_by': self.created_by,
'actual_version': self.actual_version,
'required_version': self.required_version,
}])
class JaspFileCorruptError(JaspRendererError):
"""The jasp related errors raised from a :class:`mfr.extentions.jasp` and relating to failure
while consuming JASP files should inherit from JaspFileCorruptError
"""
__TYPE = 'jasp_file_corrupt'
def __init__(self, message, *args, code: int=400, corruption_type: str='',
reason: str='', **kwargs):
super().__init__(message, *args, code=code, **kwargs)
self.corruption_type = corruption_type
self.reason = reason
self.attr_stack.append([self.__TYPE, {
'corruption_type': self.corruption_type,
'reason': self.reason,
}])
| StarcoderdataPython |
1633708 | <filename>tests/tiebreaks/test_buchholz_minus_2.py
import unittest
from swiss_tournament.data.player import Player, BYE
from swiss_tournament.data.result import Result
from swiss_tournament.data.round_pairing import RoundPairing
from swiss_tournament.data.tournament import Tournament
from swiss_tournament.step.tie_breaker import BuchholzMinus2
class BuchholzMinus2TestCase(unittest.TestCase):
unit = BuchholzMinus2()
def test_without_rounds(self):
result = self.unit.get(Player("alice"), Tournament([Player("alice")], []))
self.assertEqual(0, result)
def test_with_one_round(self):
result = self.unit.get(
player=Player("bob"),
tournament=Tournament(
players=[
Player("alice"),
Player("bob")
],
rounds=[
RoundPairing("Round 1", [
Result(Player("alice"), Player("bob"), 1)
])
]
)
)
self.assertEqual(0, result)
def test_with_two_rounds(self):
result = self.unit.get(
player=Player("bob"),
tournament=Tournament(
players=[
Player("alice"),
Player("bob"),
Player("charlie"),
Player("dave")
],
rounds=[
RoundPairing("Round 1", [
Result(Player("alice"), Player("bob"), 1),
Result(Player("charlie"), Player("dave"), 0.5)
]),
RoundPairing("Round 2", [
Result(Player("charlie"), Player("alice"), 1),
Result(Player("dave"), Player("bob"), 1)
])
]
)
)
self.assertEqual(0, result)
def test_with_three_rounds(self):
result = self.unit.get(
player=Player("bob"),
tournament=Tournament(
players=[
Player("alice"),
Player("bob"),
Player("charlie"),
Player("dave")
],
rounds=[
RoundPairing("Round 1", [
Result(Player("alice"), Player("bob"), 1),
Result(Player("charlie"), Player("dave"), 1)
]),
RoundPairing("Round 2", [
Result(Player("charlie"), Player("alice"), 1),
Result(Player("dave"), Player("bob"), 0.5)
]),
RoundPairing("Round 3", [
Result(Player("alice"), Player("dave"), 1),
Result(Player("bob"), Player("charlie"), 0.5)
])
]
)
)
self.assertEqual(2.5, result)
def test_with_bye_on_player(self):
result = self.unit.get(
player=Player("bob"),
tournament=Tournament(
players=[
Player("alice"),
Player("bob"),
Player("charlie"),
Player("dave")
],
rounds=[
RoundPairing("Round 1", [
Result(Player("alice"), Player("bob"), 1),
Result(Player("charlie"), Player("dave"), 0.5)
]),
RoundPairing("Round 2", [
Result(Player("charlie"), Player("alice"), 1),
Result(Player("bob"), BYE, 0.5)
]),
RoundPairing("Round 3", [
Result(Player("alice"), Player("dave"), 1),
Result(Player("bob"), Player("dave"), 0.5)
])
]
)
)
self.assertEqual(2, result)
def test_with_bye_on_opponent(self):
result = self.unit.get(
player=Player("bob"),
tournament=Tournament(
players=[
Player("alice"),
Player("bob"),
Player("charlie"),
Player("dave")
],
rounds=[
RoundPairing("Round 1", [
Result(Player("alice"), Player("bob"), 1),
Result(Player("charlie"), Player("dave"), 0.5)
]),
RoundPairing("Round 2", [
Result(Player("alice"), BYE, 1),
Result(Player("bob"), Player("charlie"), 0.5)
]),
RoundPairing("Round 3", [
Result(Player("alice"), Player("charlie"), 1),
Result(Player("bob"), Player("dave"), 0.5)
])
]
)
)
self.assertEqual(3, result)
| StarcoderdataPython |
3260039 | <filename>python/Exercicios/ex008.py
# CONVERSOR DE METRO PARA OUTROS TIPOS DE MEDIDAS
mt = float(input('Digite um valor (metro):'))
km = mt / 1000
hm = mt / 100
dam = mt / 10
dm = mt * 10
cm = mt * 100
mm = mt * 1000
print(f'\n {mt}M equivale a {km}km.\n {mt}M equivale a {hm}hm.\n {mt}M equivale a {dam}dam.\n {mt}M equivale a {dm}dm'
f'\n {mt}M equivale a {cm}cm.\n {mt}M equivale a {mm}mm.') | StarcoderdataPython |
3301417 | from z3 import Optimize, Real, If
x = Real('x')
y = Real('y')
z = Real('z')
def z3abs(obj):
return If(x > 0, x, -x)
optimizer = Optimize()
# optimizer.add(x>0.0)
# optimizer.add(y>0.0)
optimizer.add(x*x+y*y==1.0)
optimizer.add_soft(z == x+y)
optimizer.maximize(z)
result = optimizer.check()
print(optimizer.model())
| StarcoderdataPython |
34600 | # WRITE YOUR SOLUTION HERE:
| StarcoderdataPython |
3333422 | from django import forms
class ContactForm(forms.Form):
amount = forms.IntegerField(label='Количество экземпляров') | StarcoderdataPython |
3245588 | <filename>home/forms.py
from django import forms
from .models import product
class productform(forms.ModelForm):
class Meta:
model = product
fields = [
'id',
'name',
'instagram',
'snapchat',
'others',
]
| StarcoderdataPython |
1682143 | # from .vpype import cli
import vpype
vpype.cli()
| StarcoderdataPython |
57597 | from typing import Any
from dataclasses import dataclass
@dataclass
class ScenarioResult:
scenario: Any = None
steps: Any = None
id: str = None
message: str = None
elapsed: float = 0.0
exception: str = None
threadId: int = None
pid: int = None
startTime: Any = None
endTime: Any = None | StarcoderdataPython |
4818685 | <filename>seamges/patologias/migrations/0003_auto_20191124_1657.py
# Generated by Django 2.2.7 on 2019-11-24 19:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('patologias', '0002_auto_20191124_1630'),
]
operations = [
migrations.AlterModelOptions(
name='patologia',
options={'ordering': ['nombre'], 'verbose_name': 'Patología', 'verbose_name_plural': 'Patologías'},
),
]
| StarcoderdataPython |
4807240 | import math
import collections
class Cell(object):
def __repr__(self):
return 'C{val}'.format(val=self.val)
def __str__(self):
return self.__repr__()
def __init__(self, board, val, x, y):
self.val = val
self.change = False
self.x = x
self.y = y
self.missing_row_nums = board.missing_row_nums[self.y]
self.missing_col_nums = board.missing_col_nums[self.x]
self.missing_square_nums = board.missing_square_nums[board._determine_square_num(self.x, self.y)]
self.valid_nums = None
def get_valid_nums(self):
return self.missing_square_nums.intersection(self.missing_row_nums, self.missing_col_nums)
def has_singleton_solution(self):
self.valid_nums = self.get_valid_nums()
return len(self.valid_nums) == 1
def update_val_with_singleton(self):
if len(self.valid_nums) == 1:
self.val = self.valid_nums.pop()
self._remove_possibility(self.val)
def _remove_possibility(self, only_valid_num):
for category in (self.missing_row_nums, self.missing_col_nums, self.missing_square_nums):
category.remove(only_valid_num)
def _reinsert_possibility(self, valid_num):
for category in (self.missing_row_nums, self.missing_col_nums, self.missing_square_nums):
category.add(valid_num)
class Board(object):
def __init__(self, board):
self.board = board
self.height = len(board)
self.width = len(board[0])
self.missing_row_nums = self._get_missing_row_nums()
self.missing_col_nums = self._get_missing_col_nums()
self.missing_square_nums = self._get_missing_square_nums()
self.missing_cells = self._get_missing_cells()
def update_value(self, val, x, y):
board[y][x] = val
def update_board(self):
self.missing_row_nums = self._get_missing_row_nums()
self.missing_col_nums = self._get_missing_col_nums()
self.missing_square_nums = self._get_missing_square_nums()
def get_cell_val(self, x, y):
return self.board[y][x]
def _get_missing_row_nums(self):
missing_row_nums = []
for row in self.board:
missing_nums = set(range(1, 10))
for num in row:
if num in missing_nums:
missing_nums.remove(num)
missing_row_nums.append(missing_nums)
return missing_row_nums
def _get_missing_col_nums(self):
missing_col_nums = []
for x in range(self.width):
missing_nums = set(range(1, 10))
for y in range(self.height):
num = self.get_cell_val(x, y)
if num in missing_nums:
missing_nums.remove(num)
missing_col_nums.append(missing_nums)
return missing_col_nums
def _get_missing_square_nums(self):
missing_square_nums = [set(range(1,10)) for _ in range(self.height)]
for y in range(self.height):
for x in range(self.width):
square_idx = self._determine_square_num(x, y)
num = self.get_cell_val(x, y)
if num in missing_square_nums[square_idx]:
missing_square_nums[square_idx].remove(num)
return missing_square_nums
def _determine_square_num(self, x, y):
row_num = math.floor(y / 3)
col_num = math.floor(x / 3)
return row_num * 3 + col_num
def _get_missing_cells(self):
missing_cells = collections.OrderedDict()
for y, row in enumerate(self.board):
for x, num in enumerate(row):
if num == 0:
missing_cells[(x, y)] = Cell(self, num, x, y)
return missing_cells
def solve(self):
print('Unsolved:')
self.print_board()
board_has_changed = True
while board_has_changed:
num_missing_cells_before = len(self.missing_cells)
for _ in range(num_missing_cells_before):
missing_cell_id, missing_cell = self.missing_cells.popitem(last=False)
if missing_cell.has_singleton_solution():
missing_cell.update_val_with_singleton()
self.update_value(missing_cell.val, missing_cell.x, missing_cell.y)
else:
self.missing_cells[missing_cell_id] = missing_cell
if len(self.missing_cells) == num_missing_cells_before:
board_has_changed = False
if len(self.missing_cells) > 0:
self.backtrack_solver()
print('Solved: ')
self.print_board()
def backtrack_solver(self, ):
if len(self.missing_cells) == 0:
return True
missing_cell_id, missing_cell = self.missing_cells.popitem(last=False)
valid_nums = missing_cell.get_valid_nums()
for valid_num in valid_nums:
missing_cell._remove_possibility(valid_num)
is_valid_path = self.backtrack_solver()
if is_valid_path:
missing_cell.val = valid_num
self.update_value(valid_num, missing_cell_id[0], missing_cell_id[1])
return is_valid_path
missing_cell._reinsert_possibility(valid_num)
self.missing_cells[missing_cell_id] = missing_cell
return False
def print_board(self):
for row in self.board:
str_row = [str(num) for num in row]
print(' '.join(str_row))
print('\n')
# Easy board
def generate_board1():
board = [
[8, 0, 5, 0, 4, 0, 0, 9, 0],
[0, 6, 9, 7, 1, 0, 0, 0, 3],
[7, 0, 0, 3, 0, 8, 0, 0, 0],
[0, 0, 6, 0, 0, 0, 2, 4, 0],
[5, 4, 0, 0, 3, 0, 0, 1, 8],
[0, 1, 8, 0, 0, 0, 9, 0, 0],
[0, 0, 0, 8, 0, 4, 0, 0, 9],
[1, 0, 0, 0, 7, 3, 4, 5, 0],
[0, 7, 0, 0, 5, 0, 3, 0, 2]
]
return board
# Hard board
def generate_board2():
board = [
[int(i) for i in '006000134'],
[int(i) for i in '300400009'],
[int(i) for i in '080100000'],
[int(i) for i in '600500010'],
[int(i) for i in '000761000'],
[int(i) for i in '030008007'],
[int(i) for i in '000009080'],
[int(i) for i in '100004002'],
[int(i) for i in '245000600'],
]
return board
if __name__ == '__main__':
print('Board 1:')
board = generate_board1()
sudoku_puzzle = Board(board)
sudoku_puzzle.solve()
print('Board 2:')
board = generate_board2()
sudoku_puzzle = Board(board)
sudoku_puzzle.solve() | StarcoderdataPython |
99997 | <gh_stars>0
from django.conf.urls import url
from django.conf.urls.static import static
from . import views
app_name = "videodownloader"
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^downloadfile/$', views.downloadfile, name='downloadfile'),
]
| StarcoderdataPython |
1687419 | <reponame>zpiman/golemScripts
from subprocess import call
import urllib
import xml.etree.ElementTree as ET
import time, math
## serial is not no buon
#import serialDecoder.py as sd
TELNET = "telnet 192.168.2.241 10001"
device = '/dev/cu.usbserial'
time_delay = 0.1
time_step = 0.001
class DataGetter():
"""Abstract class used for getting the voltage from a device"""
def __init__(self):
pass
def get(self):
pass
class PotentiometerMover():
"""Abstract class used for moving the potentiometer"""
def __init__(self):
pass
def move(self, direction, distance):
pass
class AD_DataGetter(DataGetter):
"""Class using the AD4ETH A2D converter for measuring the voltage"""
def __init__(self, url):
self.url = url
def get(self):
data = urllib.urlopen(self.url).read()
root = ET.fromstring(data)
inputs = root[0]
return float(inputs.attrib['val'])
class Serial_DataGetter(DataGetter):
"""Class using the VA18B voltmeter over serial connection"""
def __init__(self, serialDevice):
self.serialDevice
def get(self):
return float(self.serialDevice.getValue())
class Relay_PotentiometerMover(PotentiometerMover):
"""Class using the Quido relay for moving the potentiometer head"""
def __init__(self):
pass
def _sendCommand(self, relayId, high):
if relayId > 16 or relayId < 1:
raise ValueError("Value out of bounds")
stateLetter = ""
if high:
stateLetter = "H"
else:
stateLetter = "L"
stringToSend = "echo '*B1OS" + str(relayId) + stateLetter + "' | " + TELNET
call(stringToSend, shell=True)
def move(self, val):
for i in xrange(abs(val)):
if val > 0:
self._sendCommand(2, True)
self._sendCommand(3, True)
else:
self._sendCommand(2, False)
self._sendCommand(3, False)
print val
time.sleep(time_delay/(abs(val)/10.0))
self._sendCommand(1, True)
time.sleep(time_step)
self._sendCommand(1, False)
time.sleep(time_step)
class Potentiometer():
def __init__(self, voltageGetter, potentiometerMover, rang=0.05):
self.vG = voltageGetter
self.pM = potentiometerMover
self.rang = rang
def setValue(self, value):
while True:
offset = self._offset(value)
if abs(offset) < self.rang: # we are in range of the value
time.sleep(1)
print "done"
if abs(self._offset(value)) > self.rang:
print "final:",self._offset(value)
continue
print self._offset(value)
return
move = self._movement(offset)
print "moving", move
self._move(int(move)) # move the head a little
def _offset(self, value):
currentVal = self.getValue()
offset = currentVal - value
return offset
def _movement(self, offset):
if offset < 0:
sign = True
else:
sign = False
move = math.ceil((offset*10))
if sign:
move *= 1
return move
def _move(self, val):
self.pM.move(val)
def getValue(self):
return self.vG.get()
po = Relay_PotentiometerMover()
## Using AD
dataGetter = AD_DataGetter('http://192.168.2.242/data.xml')
va = Potentiometer(dataGetter, po)
print va.getValue()
va.setValue(2)
## Using serial device
#with sd.SerialDevice(device, 1) as s:
# dataGetter = Serial_DataGetter(s)
# va = Potentiometer(dataGetter)
| StarcoderdataPython |
1050 | from __future__ import division
from mmtbx.tls import tools
import math
import time
pdb_str_1 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 3.000 0.000 0.000 1.00 0.00 C
"""
pdb_str_2 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 0.000 3.000 0.000 1.00 0.00 C
"""
pdb_str_3 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 0.000 0.000 3.000 1.00 0.00 C
"""
pdb_str_4 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 1.000 2.000 3.000 1.00 0.00 C
"""
def exercise_03():
sqrt = math.sqrt
vs = []
vs.append( [(sqrt(2)/2, sqrt(2)/2, 0), (-sqrt(2)/2, sqrt(2)/2, 0), (0,0,1)] )
vs.append( [(1,0,0), (0, sqrt(2)/2, sqrt(2)/2), (0, -sqrt(2)/2, sqrt(2)/2)] )
vs.append( [(sqrt(3)/2, 1/2, 0), (-1/2, sqrt(3)/2, 0), (0,0,1)] )
vs.append( [(1,0,0), (0, sqrt(3)/2, 1/2), (0, -1/2, sqrt(3)/2)] )
for pdb_str in [pdb_str_1, pdb_str_2, pdb_str_3, pdb_str_4]:
for vs_ in vs:
vx,vy,vz = vs_
print vx,vy,vz
tools.u_tls_vs_u_ens(pdb_str=pdb_str,
tx=0.05,ty=0.07,tz=0.09,
vx=vx, vy=vy, vz=vz,
n_models=1000)
if (__name__ == "__main__"):
t0 = time.time()
exercise_03()
print "Time: %6.4f"%(time.time()-t0)
print "OK"
| StarcoderdataPython |
1695068 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PushDataUpdateResponse(Model):
"""A response to a push data update.
:param update_id: The id of the push data update that you can use to track
it down.
:type update_id: str
"""
_attribute_map = {
'update_id': {'key': 'updateId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(PushDataUpdateResponse, self).__init__(**kwargs)
self.update_id = kwargs.get('update_id', None)
| StarcoderdataPython |
189183 | <filename>servicios/urls.py<gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('', views.servicios, name="servicios"),
path ('constancia/', views.constancia, name="constancia"),
path('kardex/', views.kardex, name="kardex"),
path('seguro/', views.seguro, name="seguro"),
] | StarcoderdataPython |
86158 | <filename>mindware/optimizers/base_optimizer.py
import abc
import os
import time
import numpy as np
import pickle as pkl
from mindware.utils.constant import MAX_INT
from mindware.utils.logging_utils import get_logger
from mindware.components.evaluators.base_evaluator import _BaseEvaluator
from mindware.components.utils.topk_saver import CombinedTopKModelSaver
class BaseOptimizer(object):
def __init__(self, evaluator: _BaseEvaluator, config_space, name, timestamp, eval_type, output_dir=None, seed=None):
self.evaluator = evaluator
self.config_space = config_space
assert name in ['hpo', 'fe']
self.name = name
self.seed = np.random.random_integers(MAX_INT) if seed is None else seed
self.start_time = time.time()
self.timing_list = list()
self.incumbent = None
self.eval_type = eval_type
self.logger = get_logger(self.__module__ + "." + self.__class__.__name__)
self.init_hpo_iter_num = None
self.early_stopped_flag = False
self.timestamp = timestamp
self.output_dir = output_dir
self.topk_saver = CombinedTopKModelSaver(k=50, model_dir=self.output_dir, identifier=self.timestamp)
@abc.abstractmethod
def run(self):
pass
@abc.abstractmethod
def iterate(self, budget=MAX_INT):
pass
# TODO:Refactor the other optimizers
def update_saver(self, config_list, perf_list):
# Check if all the configs is valid in case of storing None into the config file
all_invalid = True
for i, perf in enumerate(perf_list):
if np.isfinite(perf) and perf != MAX_INT:
all_invalid = False
if not isinstance(config_list[i], dict):
config = config_list[i].get_dictionary().copy()
else:
config = config_list[i].copy()
if self.evaluator.fixed_config is not None:
if not isinstance(self.evaluator.fixed_config, dict):
fixed_config = self.evaluator.fixed_config.get_dictionary().copy()
else:
fixed_config = self.evaluator.fixed_config.copy()
config.update(fixed_config)
classifier_id = config['algorithm']
# -perf: The larger, the better.
save_flag, model_path, delete_flag, model_path_deleted = self.topk_saver.add(config, -perf,
classifier_id)
# By default, the evaluator has already stored the models.
if self.eval_type in ['holdout', 'partial']:
if save_flag:
pass
else:
os.remove(model_path)
self.logger.info("Model deleted from %s" % model_path)
try:
if delete_flag:
os.remove(model_path_deleted)
self.logger.info("Model deleted from %s" % model_path_deleted)
else:
pass
except:
pass
else:
continue
if not all_invalid:
self.topk_saver.save_topk_config()
def get_evaluation_stats(self):
return
def gc(self):
return
| StarcoderdataPython |
153947 | <reponame>object-oriented-human/competitive
n = int(input())
l = list(map(int, input().split()))
print(all([x > 0 for x in l]) and any([str(x)[::-1] == str(x) for x in l])) | StarcoderdataPython |
3315392 | import torch
from torch import Tensor
from torch.nn import Module
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
from typing import Optional, Sequence, List, Dict, SupportsFloat
from utils import make_batch_one_hot
class CumulativeStatistic:
def __init__(self):
self.values_sum = 0.0
self.overall_count = 0.0
self.average = 0.0
def update_using_counts(self, value: SupportsFloat, count: SupportsFloat = 1.0):
value = float(value)
count = float(count)
self.values_sum += value
self.overall_count += count
self.average = self.values_sum / self.overall_count
def update_using_averages(self, value: SupportsFloat, count: SupportsFloat = 1.0):
value = float(value) * float(count)
count = float(count)
self.values_sum += value
self.overall_count += count
self.average = self.values_sum / self.overall_count
def get_accuracy(model: Module, test_dataset: Dataset, device: Optional[torch.device] = None,
required_top_k: Optional[Sequence[int]] = None, return_detailed_outputs: bool = False,
criterion: Module = None, make_one_hot: bool = False, n_classes: int = -1, **kwargs) -> \
(torch.FloatTensor, Optional[float], Optional[Tensor], Optional[Tensor]):
stats: Dict[int, CumulativeStatistic]
max_top_k: int
all_val_outputs_tmp: List[Tensor] = []
all_val_labels_tmp: List[Tensor] = []
all_val_outputs: Optional[Tensor] = None
all_val_labels: Optional[Tensor] = None
test_loss: Optional[CumulativeStatistic] = None
test_loader: DataLoader
if required_top_k is None:
required_top_k = [1]
max_top_k = max(required_top_k)
stats = {}
for top_k in required_top_k:
stats[top_k] = CumulativeStatistic()
if criterion is not None:
# Enable test loss
test_loss = CumulativeStatistic()
if make_one_hot and n_classes <= 0:
raise ValueError("n_class must be set when using one_hot_vectors")
test_loader = DataLoader(test_dataset, **kwargs)
model.eval()
with torch.no_grad():
patterns: Tensor
labels: Tensor
targets: Tensor
output: Tensor
for patterns, labels in test_loader:
# Clear grad
model.zero_grad()
if return_detailed_outputs:
all_val_labels_tmp.append(labels.detach().cpu())
if make_one_hot:
targets = make_batch_one_hot(labels, n_classes)
else:
targets = labels
# Send data to device
if device is not None:
patterns = patterns.to(device)
targets = targets.to(device)
# Forward
output = model(patterns)
if criterion is not None:
test_loss.update_using_averages(criterion(output, targets).detach().cpu().item(), count=len(labels))
output = output.detach().cpu()
if return_detailed_outputs:
all_val_outputs_tmp.append(output)
# https://gist.github.com/weiaicunzai/2a5ae6eac6712c70bde0630f3e76b77b
# Gets the indexes of max_top_k elements
_, top_k_idx = output.topk(max_top_k, 1)
top_k_idx = top_k_idx.t()
# correct will have values True where index == label
correct = top_k_idx.eq(labels.reshape(1, -1).expand_as(top_k_idx))
for top_k in required_top_k:
correct_k = correct[:top_k].reshape(-1).float().sum(0) # Number of correct patterns for this top_k
stats[top_k].update_using_counts(correct_k, len(labels))
if return_detailed_outputs:
all_val_outputs = torch.cat(all_val_outputs_tmp)
all_val_labels = torch.cat(all_val_labels_tmp)
acc_results = torch.empty(len(required_top_k), dtype=torch.float)
for top_idx, top_k in enumerate(required_top_k):
acc_results[top_idx] = stats[top_k].average
test_loss_result = None
if criterion is not None:
test_loss_result = test_loss.average
return acc_results, test_loss_result, all_val_outputs, all_val_labels
| StarcoderdataPython |
3204196 | <reponame>2degrees/djeneralize<gh_stars>1-10
from django.db import models
from djeneralize.fields import SpecializedForeignKey
from djeneralize.models import BaseGeneralizationModel
class Shop(models.Model):
name = models.CharField(max_length=30)
producer = SpecializedForeignKey('FruitProducer', related_name='shops')
class FruitProducer(BaseGeneralizationModel):
name = models.CharField(max_length=30)
pen = models.ForeignKey('writing.WritingImplement')
produce = SpecializedForeignKey('fruit.Fruit')
class EcoProducer(FruitProducer):
fertilizer = models.CharField(max_length=30)
class Meta:
specialization = 'eco_producer'
class StandardProducer(FruitProducer):
chemicals = models.CharField(max_length=30)
class Meta:
specialization = 'standard_producer'
| StarcoderdataPython |
156816 | <filename>tau/core/migrations/0002_reset_all_account_webhooks.py
# Generated by Django 3.1.7 on 2021-11-06 12:18
from django.db import migrations
from constance import config
def toggle_reset_webhooks(apps, schema_editor):
config.RESET_ALL_WEBHOOKS = True
class Migration(migrations.Migration):
dependencies = [
('core', '0001_scope_update_for_chat'),
]
operations = [
migrations.RunPython(toggle_reset_webhooks)
]
| StarcoderdataPython |
1703292 | """add new section table
Revision ID: c5cf60c29302
Revises: <KEY>
Create Date: 2016-06-21 13:26:54.041246
"""
# revision identifiers, used by Alembic.
revision = 'c5cf60c29302'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('section',
sa.Column('id', sa.INTEGER()),
sa.Column('name', sa.String(), default=''),
sa.PrimaryKeyConstraint('id'))
op.add_column('category', sa.Column('section_id', sa.INTEGER()))
sa.ForeignKeyConstraint(['section_id'], ['section.id'])
def downgrade():
op.drop_table('section')
op.drop_column('category', 'section_id')
| StarcoderdataPython |
8423 | <filename>pctest/test_publish.py<gh_stars>0
#!/usr/bin/python3
# pip3 install websockets
import asyncio
import websockets
import json
import datetime
import sys
class test_publish:
idnum = 1
def __init__( self, sym, price, spread ):
self.symbol = sym
self.pidnum = test_publish.idnum
test_publish.idnum += 1
self.sidnum = test_publish.idnum
test_publish.idnum += 1
self.psubid = -1
self.ssubid = -1
self.price = price
self.spread = spread
def gen_subscribe_price(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.sidnum
}
return json.dumps( req )
def gen_subscribe_price_sched(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price_sched',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.pidnum
}
return json.dumps( req )
def gen_update_price(self):
req = {
'jsonrpc': '2.0',
'method': 'update_price',
'params':{
'account': self.account,
'price_type': 'price',
'status': 'trading',
'price': self.price,
'conf': self.spread
},
'id': None
}
self.price += self.spread
return json.dumps( req )
def parse_reply( self, msg, allsub ):
# parse subscription replies
subid = msg['result']['subscription']
allsub[subid] = self
if msg['id'] == self.pidnum:
self.psubid = subid;
else:
self.ssubid = subid
async def parse_notify( self, ws, msg ):
# parse subscription notification messages
subid = msg['params']['subscription']
ts = datetime.datetime.utcnow().isoformat()
if subid == self.ssubid:
# aggregate price update
res = msg['params']['result']
price = res['price']
spread = res['conf']
status = res['status']
print( f'{ts} received aggregate price update symbol=' + self.symbol +
f',price={price}, spread={spread}, status={status}' )
else:
# request to submit price
print( f'{ts} submit price to block-chain symbol=' + self.symbol +
f',price={self.price}, spread={self.spread}, subscription={subid}')
await ws.send( self.gen_update_price() )
async def subscribe( self, acct, ws, allids ):
# submmit initial subscriptions
self.account = acct
allids[self.pidnum] = self
allids[self.sidnum] = self
await ws.send( self.gen_subscribe_price() )
await ws.send( self.gen_subscribe_price_sched() )
# wbsocket event loop
async def poll( uri ):
# connect to pythd
ws = await websockets.connect(uri)
# submit subscriptions to pythd
allids = {}
allsub = {}
allsym = {}
sym1 = test_publish( 'SYMBOL1/USD', 10000, 100 )
sym2 = test_publish( 'SYMBOL2/USD', 2000000, 20000 )
allsym[sym1.symbol] = sym1
allsym[sym2.symbol] = sym2
# lookup accounts by symbol and subscribe
req = { 'jsonrpc': '2.0', 'method': 'get_product_list', 'id': None }
await ws.send( json.dumps( req ) )
msg = json.loads( await ws.recv() )
for prod in msg['result']:
sym = prod['attr_dict']['symbol']
for px in prod['price']:
if sym in allsym and px['price_type'] == 'price':
await allsym[sym].subscribe( px['account'], ws, allids );
# poll for updates from pythd
while True:
msg = json.loads( await ws.recv() )
# print(msg)
if 'error' in msg:
ts = datetime.datetime.utcnow().isoformat()
code = msg['error']['code']
emsg = msg['error']['message']
print( f'{ts} error code: {code} msg: {emsg}' )
sys.exit(1)
elif 'result' in msg:
msgid = msg['id']
if msgid in allids:
allids[msgid].parse_reply( msg, allsub )
else:
subid = msg['params']['subscription']
if subid in allsub:
await allsub[subid].parse_notify( ws, msg )
# connect to pythd, subscribe to and start publishing on two symbols
if __name__ == '__main__':
uri='ws://localhost:8910'
eloop = asyncio.get_event_loop()
try:
eloop.run_until_complete( poll( uri ) )
except ConnectionRefusedError:
print( f'connection refused uri={uri}' )
sys.exit(1)
| StarcoderdataPython |
1735811 | #
# script.py
#
# This file is modified from python-mitcoinlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Scripts
Functionality to build scripts, as well as SignatureHash().
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from test_framework.mininode import CTransaction, CTxOut, hash256
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import copy
import struct
import test_framework.bignum
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_NOP2 = CScriptOp(0xb1)
OP_NOP3 = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_NOP2,
OP_NOP3,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_NOP2 : 'OP_NOP2',
OP_NOP3 : 'OP_NOP3',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0' : OP_0,
'OP_PUSHDATA1' : OP_PUSHDATA1,
'OP_PUSHDATA2' : OP_PUSHDATA2,
'OP_PUSHDATA4' : OP_PUSHDATA4,
'OP_1NEGATE' : OP_1NEGATE,
'OP_RESERVED' : OP_RESERVED,
'OP_1' : OP_1,
'OP_2' : OP_2,
'OP_3' : OP_3,
'OP_4' : OP_4,
'OP_5' : OP_5,
'OP_6' : OP_6,
'OP_7' : OP_7,
'OP_8' : OP_8,
'OP_9' : OP_9,
'OP_10' : OP_10,
'OP_11' : OP_11,
'OP_12' : OP_12,
'OP_13' : OP_13,
'OP_14' : OP_14,
'OP_15' : OP_15,
'OP_16' : OP_16,
'OP_NOP' : OP_NOP,
'OP_VER' : OP_VER,
'OP_IF' : OP_IF,
'OP_NOTIF' : OP_NOTIF,
'OP_VERIF' : OP_VERIF,
'OP_VERNOTIF' : OP_VERNOTIF,
'OP_ELSE' : OP_ELSE,
'OP_ENDIF' : OP_ENDIF,
'OP_VERIFY' : OP_VERIFY,
'OP_RETURN' : OP_RETURN,
'OP_TOALTSTACK' : OP_TOALTSTACK,
'OP_FROMALTSTACK' : OP_FROMALTSTACK,
'OP_2DROP' : OP_2DROP,
'OP_2DUP' : OP_2DUP,
'OP_3DUP' : OP_3DUP,
'OP_2OVER' : OP_2OVER,
'OP_2ROT' : OP_2ROT,
'OP_2SWAP' : OP_2SWAP,
'OP_IFDUP' : OP_IFDUP,
'OP_DEPTH' : OP_DEPTH,
'OP_DROP' : OP_DROP,
'OP_DUP' : OP_DUP,
'OP_NIP' : OP_NIP,
'OP_OVER' : OP_OVER,
'OP_PICK' : OP_PICK,
'OP_ROLL' : OP_ROLL,
'OP_ROT' : OP_ROT,
'OP_SWAP' : OP_SWAP,
'OP_TUCK' : OP_TUCK,
'OP_CAT' : OP_CAT,
'OP_SUBSTR' : OP_SUBSTR,
'OP_LEFT' : OP_LEFT,
'OP_RIGHT' : OP_RIGHT,
'OP_SIZE' : OP_SIZE,
'OP_INVERT' : OP_INVERT,
'OP_AND' : OP_AND,
'OP_OR' : OP_OR,
'OP_XOR' : OP_XOR,
'OP_EQUAL' : OP_EQUAL,
'OP_EQUALVERIFY' : OP_EQUALVERIFY,
'OP_RESERVED1' : OP_RESERVED1,
'OP_RESERVED2' : OP_RESERVED2,
'OP_1ADD' : OP_1ADD,
'OP_1SUB' : OP_1SUB,
'OP_2MUL' : OP_2MUL,
'OP_2DIV' : OP_2DIV,
'OP_NEGATE' : OP_NEGATE,
'OP_ABS' : OP_ABS,
'OP_NOT' : OP_NOT,
'OP_0NOTEQUAL' : OP_0NOTEQUAL,
'OP_ADD' : OP_ADD,
'OP_SUB' : OP_SUB,
'OP_MUL' : OP_MUL,
'OP_DIV' : OP_DIV,
'OP_MOD' : OP_MOD,
'OP_LSHIFT' : OP_LSHIFT,
'OP_RSHIFT' : OP_RSHIFT,
'OP_BOOLAND' : OP_BOOLAND,
'OP_BOOLOR' : OP_BOOLOR,
'OP_NUMEQUAL' : OP_NUMEQUAL,
'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
'OP_LESSTHAN' : OP_LESSTHAN,
'OP_GREATERTHAN' : OP_GREATERTHAN,
'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
'OP_MIN' : OP_MIN,
'OP_MAX' : OP_MAX,
'OP_WITHIN' : OP_WITHIN,
'OP_RIPEMD160' : OP_RIPEMD160,
'OP_SHA1' : OP_SHA1,
'OP_SHA256' : OP_SHA256,
'OP_HASH160' : OP_HASH160,
'OP_HASH256' : OP_HASH256,
'OP_CODESEPARATOR' : OP_CODESEPARATOR,
'OP_CHECKSIG' : OP_CHECKSIG,
'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
'OP_NOP1' : OP_NOP1,
'OP_NOP2' : OP_NOP2,
'OP_NOP3' : OP_NOP3,
'OP_NOP4' : OP_NOP4,
'OP_NOP5' : OP_NOP5,
'OP_NOP6' : OP_NOP6,
'OP_NOP7' : OP_NOP7,
'OP_NOP8' : OP_NOP8,
'OP_NOP9' : OP_NOP9,
'OP_NOP10' : OP_NOP10,
'OP_SMALLINTEGER' : OP_SMALLINTEGER,
'OP_PUBKEYS' : OP_PUBKEYS,
'OP_PUBKEYHASH' : OP_PUBKEYHASH,
'OP_PUBKEY' : OP_PUBKEY,
}
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum(object):
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, (int, long)):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bignum.bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % binascii.hexlify(o).decode('utf8')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut())
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
| StarcoderdataPython |
3372880 | <reponame>christus02/citrix-adc-metrics-exporter-serverless<gh_stars>1-10
import json
import copy
IN_JSON = "metrics.json"
OUT_JSON = "out.json"
#Read the input metrics json
with open(IN_JSON) as f:
metrics = json.load(f)
f.close()
UNIT_CONVERSION = [
{"key": "_mbits_rate", "value": "Megabits/Second"},
{"key": "_mbits", "value": "Megabytes"},
{"key": "_mb", "value": "Megabytes"},
{"key": "_rate", "value": "Count/Second"},
{"key": "percent", "value": "Percent"}
]
COUNTER_TEMPLATE = {
'MetricName': '',
'Unit': 'Count',
'Value': '',
'Timestamp': '',
'Dimensions': [
{'Name': 'Description', 'Value': ''},
{'Name': 'CitrixADC-AutoScale-Group', 'Value': ''},
{'Name': 'CitrixADC-InstanceID', 'Value': ''}
]
}
out_ds = dict()
for feature in metrics.keys():
out_ds[feature] = dict()
out_ds[feature]['counters'] = list()
for cntr in metrics[feature].get('counters', []):
cntr_template = copy.deepcopy(COUNTER_TEMPLATE) # Deep copy is required as we have dict() of dict()
metric_name = cntr[0]
metric_description = cntr[1]
cntr_template['MetricName'] = metric_name
cntr_template['Dimensions'][0]['Value'] = metric_description
#Find the right Unit
for unit in UNIT_CONVERSION:
if unit['key'] in metric_description:
cntr_template['Unit'] = unit['value']
break
out_ds[feature]['counters'].append(cntr_template)
for cntr in metrics[feature].get('gauges', []):
cntr_template = copy.deepcopy(COUNTER_TEMPLATE) # Deep copy is required as we have dict() of dict()
metric_name = cntr[0]
metric_description = cntr[1]
cntr_template['MetricName'] = metric_name
cntr_template['Dimensions'][0]['Value'] = metric_description
#Find the right Unit
for unit in UNIT_CONVERSION:
if unit['key'] in metric_description:
cntr_template['Unit'] = unit['value']
break
out_ds[feature]['counters'].append(cntr_template)
#Write to a JSON File
with open(OUT_JSON, 'w') as f:
json.dump(out_ds, f, indent=4)
f.close()
| StarcoderdataPython |
3329144 | <filename>Lagrange_poly1.py
#! /usr/bin/env python
"""
File: Lagrange_poly1.py
Copyright (c) 2016 <NAME>
License: MIT
Course: PHYS227
Assignment: 5.23
Date: Feb 20, 2016
Email: <EMAIL>
Name: <NAME>
Description: Implements Lagrange's interpolation formula
"""
import numpy as np
def p_L(x, xp, yp):
"""
Returns the polynomial pL(x), known as Lagrange's interpolation formula
"""
summation = 0.0
for k in xrange(len(yp)):
summation += L_k(x, k, xp, yp) * yp[k]
return summation
def L_k(x, k, xp, yp):
"""
Returns the product that is used in calculating Lagrange's interpolation formula
"""
product = 1.0
for i in xrange(len(xp)):
if i == k:
continue
product *= (x - xp[i]) / float(xp[k] - xp[i])
return product
def test_L():
xp = np.asarray([2, 3])
yp = np.asarray([])
assert(abs(L_k(-1, 1, xp, yp) - (-3)) < 1e-3)
def test_p_L():
xp = np.linspace(0, np.pi, 5)
yp = np.sin(xp)
for i in xrange(len(yp)):
assert(abs(p_L(xp[i], xp, yp) - yp[i]) < 1e-3), 'Failure'
test_p_L()
assert(abs(p_L(np.pi * 0.375, np.linspace(0, np.pi, 5), np.sin(np.linspace(0, np.pi, 5))) - np.sin(0.375 * np.pi)) < 1e-3) | StarcoderdataPython |
3359742 | <reponame>davidcim/wirinj
from unittest import TestCase
from wirinj import Autowiring, Definitions
from wirinj.core import INJECTED
from wirinj.injector import Injector
class Reality(object):
pass
class Thing:
reality: Reality = INJECTED
cfg = INJECTED
not_injected = 'ABC'
def __init__(self, param):
self.param = param
config = {
'cfg': 'DEF',
}
class TestInjector(TestCase):
def test_field_injection(self):
inj = Injector(Definitions(config), Autowiring())
thing = inj.get(Thing, 'my-param')
self.assertIsInstance(thing, Thing)
self.assertEqual(thing.param, 'my-param')
self.assertIsInstance(thing.reality, Reality)
self.assertEqual(thing.not_injected, 'ABC')
self.assertEqual(thing.cfg, 'DEF')
| StarcoderdataPython |
1770520 | <filename>sphinx/source/docs/user_guide/examples/styling_fixed_ticker.py
from bokeh.plotting import figure, output_file, show
output_file("fixed_ticks.html")
p = figure(plot_width=400, plot_height=400)
p.circle([1,2,3,4,5], [2,5,8,2,7], size=10)
p.xaxis.ticker = [2, 3.5, 4]
show(p)
| StarcoderdataPython |
192894 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 14:53:39 2020
@author: nbarl
"""
import csv
def mirrorDouble(fileName) :
with open(fileName, 'r+', newline='') as file:
dataReader = csv.reader(file, delimiter=';')
newData = []
for row in dataReader :
newLine = createMirror(row)
newData.append(newLine)
dataWriter = csv.writer(file, delimiter=';')
for row in newData[1:] :
dataWriter.writerow(row)
def createMirror(l) :
newL = []
width = 7
height = 6
for i in range(width) :
for j in range(height) :
newL.append(l[height*(width-i-1) + j])
shift = width*height
for i in range(width) :
newL.append(l[width-i-1+shift])
return newL | StarcoderdataPython |
3397685 | import tests.perf.test_ozone_ar_speed_many as gen
gen.run_test(350)
| StarcoderdataPython |
75701 | from setuptools import find_packages, setup
LONG_DESCRIPTION = (
'Desc.'
)
setup(
name='mhealth',
version='0.0.3',
packages=find_packages(where='src'),
package_dir={'': 'src'},
url='https://github.com/callumstew/pymhealth',
author='<NAME>',
author_email='<EMAIL>',
description='An mHealth processing and feature extraction library',
long_description=LONG_DESCRIPTION,
install_requires=[
'numpy',
'numba',
'scipy',
'hdbscan'
],
classifiers=[
'Programming Language :: Python',
]
)
| StarcoderdataPython |
1616853 | <reponame>ETLopes/Marmileve-kivy<gh_stars>0
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from prettytable import from_db_cursor
from prettytable import PrettyTable
import sqlite3
conn = sqlite3.connect('marmileve.db')
c = conn.cursor()
class TelaInicial(FloatLayout):
def on_press_bt(self):
login = self.ids.login_text.text
senha = self.ids.senha_text.text
c.execute('SELECT * FROM login WHERE login = ? and senha = ?', (login, senha))
query = (c.fetchone())
if query is not None:
self.clear_widgets()
self.add_widget(TelaMenu())
class TelaMenu(FloatLayout):
def add_pedido(self):
self.clear_widgets()
self.add_widget(AddPedido())
def show_estoque(self):
self.clear_widgets()
self.add_widget(ShowEstoque())
def add_clientes(self):
self.clear_widgets()
self.add_widget(AddCliente())
def telainicial(self):
self.clear_widgets()
self.add_widget(TelaInicial())
x = PrettyTable()
class AddPedido(FloatLayout):
global pedido
pedido = []
def telamenu(self):
self.clear_widgets()
self.add_widget(TelaMenu())
x.clear_rows()
global pedido
pedido = []
item = []
print(str(item) + 'pedido tem que estar limpo')
print(str(pedido) + 'pedido tem que estar limpo' )
def addpedido(self):
global item
item = []
if x.field_names == []:
x.field_names = ['Cliente', 'Prato', 'Tam', 'Qtd']
item.append(self.ids.cliente.text)
item.append(self.ids.prato.text)
item.append((self.ids.tam.text).upper())
item.append(self.ids.qtd.text)
print(str(item) + 'item só deve ter um item')
x.add_row(item)
pedido.append(item)
self.ids.resumo.text = str(x)
print(str(pedido) + 'um ou mais itens')
return pedido
def rempedido(self):
if len(pedido) > 0:
x.del_row(len(pedido)-1)
del pedido[len(pedido)-1]
self.ids.resumo.text = str(x)
def concluipedido(self):
global pedido
if len(pedido) > 0:
print(pedido)
for i in pedido:
c.execute('INSERT INTO pedidos(pedido_cliente, pedido_pratoid, pedido_tam, pedido_qtd, pedido_data) VALUES (?,?,?,?,datetime("now"))', (i[0], i[1], i[2], i[3]))
conn.commit()
pedido = []
x.clear_rows()
self.ids.resumo.text = 'Pedido adicionado'
class ShowEstoque(FloatLayout):
def telamenu(self):
self.clear_widgets()
self.add_widget(TelaMenu())
def estoquelist(self):
c.execute('SELECT * FROM show_estoque')
t = from_db_cursor(c)
self.ids.label_estoque.text = str(t)
class AddCliente(FloatLayout):
def telamenu(self):
self.clear_widgets()
self.add_widget(TelaMenu())
class MarmileveApp(App):
pass
janela = MarmileveApp()
janela.run()
| StarcoderdataPython |
41534 | <filename>flask_tube/app.py
# -*- coding: utf-8 -*-
import os
import logging
import inspect
from logging.handlers import SMTPHandler, RotatingFileHandler
from werkzeug.utils import import_string, find_modules
from flask import Flask, Blueprint
class App(Flask):
"""Custom Flask Class."""
def __init__(
self,
import_name,
config=None,
packages=None,
extensions=None,
middlewares=None,
errorhandlers=None):
super(App, self).__init__(
import_name,
instance_path=os.getcwd(),
instance_relative_config=True)
self.packages = packages if packages else []
self.exts = extensions if extensions else []
self.middlewares = middlewares if middlewares else []
self.errorhandlers = errorhandlers if errorhandlers else[]
# config
if config:
self.config.from_pyfile(config)
elif os.getenv('FLASK') == 'dev':
self.config.from_pyfile('config/development.conf')
self.logger.info("Config: Development")
elif os.getenv('FLASK') == 'test':
self.config.from_pyfile('config/test.conf')
self.logger.info("Config: Test")
else:
self.config.from_pyfile('config/production.conf')
self.logger.info("Config: Production")
self.configure_extensions(self.exts)
self.configure_middlewares(self.middlewares)
self.configure_errorhandlers(self.errorhandlers)
# register module
self.configure_packages(self.packages)
self.configure_logging()
def configure_extensions(self, extensions):
for extension in extensions.__dict__.values():
if not inspect.isclass(extension) and hasattr(extension, 'init_app'):
extension.init_app(self)
def configure_packages(self, packages):
for package_name in packages:
package_name = '%s.%s' % (self.import_name, package_name)
modules = find_modules(package_name)
for module in modules:
__import__(module)
package = import_string(package_name)
for attr_name in dir(package):
attr = getattr(package, attr_name)
if isinstance(attr, Blueprint):
self.register_blueprint(attr)
def configure_middlewares(self, middlewares):
for middleware in middlewares:
middleware(self)
def configure_errorhandlers(self, errorhandlers):
for errorhandler in errorhandlers:
errorhandler(self)
def configure_logging(self):
mail_handler = \
SMTPHandler(self.config['MAIL_SERVER'],
self.config['DEFAULT_MAIL_SENDER'],
self.config['ADMINS'],
'selflication error',
(
self.config['MAIL_USERNAME'],
self.config['MAIL_PASSWORD'],
))
mail_handler.setLevel(logging.ERROR)
self.logger.addHandler(mail_handler)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
debug_log = os.path.join(self.root_path,
self.config['DEBUG_LOG'])
debug_file_handler = \
RotatingFileHandler(debug_log,
maxBytes=100000,
backupCount=10)
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(formatter)
self.logger.addHandler(debug_file_handler)
error_log = os.path.join(self.root_path,
self.config['ERROR_LOG'])
error_file_handler = \
RotatingFileHandler(error_log,
maxBytes=100000,
backupCount=10)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
self.logger.addHandler(error_file_handler)
| StarcoderdataPython |
29808 | <reponame>Surfict/osparc-simcore
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
# pylint:disable=no-member
# pylint:disable=protected-access
# pylint:disable=too-many-arguments
import re
import shutil
import tempfile
import threading
from collections import namedtuple
from pathlib import Path
from typing import Any, Dict, Optional, Type, Union
import pytest
from aiohttp.client import ClientSession
from attr import dataclass
from pydantic.error_wrappers import ValidationError
from simcore_sdk.node_ports_v2 import exceptions, node_config
from simcore_sdk.node_ports_v2.links import DownloadLink, FileLink, PortLink
from simcore_sdk.node_ports_v2.port import Port
from utils_port_v2 import create_valid_port_config
from yarl import URL
##################### HELPERS
def camel_to_snake(name):
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
PortParams = namedtuple(
"PortParams",
"port_cfg, exp_value_type, exp_value_converter, exp_value, exp_get_value, new_value, exp_new_value, exp_new_get_value",
)
def this_node_file_name() -> Path:
return Path(tempfile.gettempdir(), "this_node_file.txt")
def another_node_file_name() -> Path:
return Path(tempfile.gettempdir(), "another_node_file.txt")
def download_file_folder_name() -> Path:
return Path(tempfile.gettempdir(), "simcorefiles", f"{threading.get_ident()}")
def project_id() -> str:
return "cd0d8dbb-3263-44dc-921c-49c075ac0dd9"
def node_uuid() -> str:
return "609b7af4-6861-4aa7-a16e-730ea8125190"
def user_id() -> int:
return 666
def simcore_store_id() -> str:
return "0"
def datcore_store_id() -> str:
return "1"
def e_tag() -> str:
return "1212132546546321-1"
##################### FIXTURES
@pytest.fixture
def this_node_file(tmp_path: Path) -> Path:
file_path = this_node_file_name()
file_path.write_text("some dummy data")
assert file_path.exists()
yield file_path
if file_path.exists():
file_path.unlink()
@pytest.fixture
def another_node_file() -> Path:
file_path = another_node_file_name()
file_path.write_text("some dummy data")
assert file_path.exists()
yield file_path
if file_path.exists():
file_path.unlink()
@pytest.fixture
def download_file_folder() -> Path:
destination_path = download_file_folder_name()
destination_path.mkdir(parents=True, exist_ok=True)
yield destination_path
if destination_path.exists():
shutil.rmtree(destination_path)
@pytest.fixture(scope="module", name="project_id")
def project_id_fixture() -> str:
"""NOTE: since pytest does not allow to use fixtures inside parametrizations,
this trick allows to re-use the same function in a fixture with a same "fixture" name"""
return project_id()
@pytest.fixture(scope="module", name="node_uuid")
def node_uuid_fixture() -> str:
"""NOTE: since pytest does not allow to use fixtures inside parametrizations,
this trick allows to re-use the same function in a fixture with a same "fixture" name"""
return node_uuid()
@pytest.fixture(scope="module", name="user_id")
def user_id_fixture() -> int:
"""NOTE: since pytest does not allow to use fixtures inside parametrizations,
this trick allows to re-use the same function in a fixture with a same "fixture" name"""
return user_id()
@pytest.fixture
async def mock_download_file(
monkeypatch,
this_node_file: Path,
project_id: str,
node_uuid: str,
download_file_folder: Path,
):
async def mock_download_file_from_link(
download_link: URL,
local_folder: Path,
session: Optional[ClientSession] = None,
file_name: Optional[str] = None,
) -> Path:
assert str(local_folder).startswith(str(download_file_folder))
destination_path = local_folder / this_node_file.name
destination_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(this_node_file, destination_path)
return destination_path
from simcore_sdk.node_ports_common import filemanager
monkeypatch.setattr(
filemanager, "download_file_from_link", mock_download_file_from_link
)
@pytest.fixture(scope="session", name="e_tag")
def e_tag_fixture() -> str:
return "1212132546546321-1"
@pytest.fixture
async def mock_upload_file(mocker, e_tag):
mock = mocker.patch(
"simcore_sdk.node_ports_common.filemanager.upload_file",
return_value=(simcore_store_id(), e_tag),
)
yield mock
@pytest.fixture
def common_fixtures(
loop,
storage_v0_service_mock,
mock_download_file,
mock_upload_file,
this_node_file: Path,
another_node_file: Path,
download_file_folder: Path,
):
"""this module main fixture"""
node_config.STORAGE_ENDPOINT = "storage:8080"
##################### TESTS
@pytest.mark.parametrize(
"port_cfg, exp_value_type, exp_value_converter, exp_value, exp_get_value, new_value, exp_new_value, exp_new_get_value",
[
pytest.param(
*PortParams(
port_cfg=create_valid_port_config("integer", defaultValue=3),
exp_value_type=(int),
exp_value_converter=int,
exp_value=3,
exp_get_value=3,
new_value=7,
exp_new_value=7,
exp_new_get_value=7,
),
id="integer value with default",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config("number", defaultValue=-23.45),
exp_value_type=(float),
exp_value_converter=float,
exp_value=-23.45,
exp_get_value=-23.45,
new_value=7,
exp_new_value=7.0,
exp_new_get_value=7.0,
),
id="number value with default",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config("boolean", defaultValue=True),
exp_value_type=(bool),
exp_value_converter=bool,
exp_value=True,
exp_get_value=True,
new_value=False,
exp_new_value=False,
exp_new_get_value=False,
),
id="boolean value with default",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"boolean", defaultValue=True, value=False
),
exp_value_type=(bool),
exp_value_converter=bool,
exp_value=False,
exp_get_value=False,
new_value=True,
exp_new_value=True,
exp_new_get_value=True,
),
id="boolean value with default and value",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config("data:*/*", key="no_file"),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=None,
exp_get_value=None,
new_value=str(this_node_file_name()),
exp_new_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
e_tag=e_tag(),
),
exp_new_get_value=download_file_folder_name()
/ "no_file"
/ this_node_file_name().name,
),
id="file type with no payload",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"data:*/*",
key="no_file_with_default",
defaultValue=str(this_node_file_name()),
),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=None,
exp_get_value=None,
new_value=this_node_file_name(),
exp_new_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
e_tag=e_tag(),
),
exp_new_get_value=download_file_folder_name()
/ "no_file_with_default"
/ this_node_file_name().name,
),
id="file link with no payload and default value",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"data:*/*",
key="some_file",
value={
"store": simcore_store_id(),
"path": f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
},
),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
),
exp_get_value=download_file_folder_name()
/ "some_file"
/ this_node_file_name().name,
new_value=None,
exp_new_value=None,
exp_new_get_value=None,
),
id="file link with payload that gets reset",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"data:*/*",
key="some_file_with_file_to_key_map",
fileToKeyMap={
"a_new_fancy_name.csv": "some_file_with_file_to_key_map"
},
value={
"store": simcore_store_id(),
"path": f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
},
),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
),
exp_get_value=download_file_folder_name()
/ "some_file_with_file_to_key_map"
/ "a_new_fancy_name.csv",
new_value=None,
exp_new_value=None,
exp_new_get_value=None,
),
id="file link with fileToKeyMap with payload that gets reset",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"data:*/*",
key="some_file_on_datcore",
value={
"store": datcore_store_id(),
"path": f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
"dataset": "some blahblah",
"label": "some blahblah",
},
),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=FileLink(
store=datcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
dataset="some blahblah",
label="some blahblah",
),
exp_get_value=download_file_folder_name()
/ "some_file_on_datcore"
/ this_node_file_name().name,
new_value=this_node_file_name(),
exp_new_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
e_tag=e_tag(),
),
exp_new_get_value=download_file_folder_name()
/ "some_file_on_datcore"
/ this_node_file_name().name,
),
id="file link with payload on store 1",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"data:*/*",
key="download_link",
value={
"downloadLink": "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/README.md"
},
),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=DownloadLink(
downloadLink="https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/README.md"
),
exp_get_value=download_file_folder_name()
/ "download_link"
/ this_node_file_name().name,
new_value=this_node_file_name(),
exp_new_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
e_tag=e_tag(),
),
exp_new_get_value=download_file_folder_name()
/ "download_link"
/ this_node_file_name().name,
),
id="download link file type gets set back on store",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"data:*/*",
key="download_link_with_file_to_key",
fileToKeyMap={
"a_cool_file_type.zip": "download_link_with_file_to_key"
},
value={
"downloadLink": "https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/README.md"
},
),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=DownloadLink(
downloadLink="https://raw.githubusercontent.com/ITISFoundation/osparc-simcore/master/README.md"
),
exp_get_value=download_file_folder_name()
/ "download_link_with_file_to_key"
/ "a_cool_file_type.zip",
new_value=this_node_file_name(),
exp_new_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
e_tag=e_tag(),
),
exp_new_get_value=download_file_folder_name()
/ "download_link_with_file_to_key"
/ "a_cool_file_type.zip",
),
id="download link file type with filetokeymap gets set back on store",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"data:*/*",
key="file_port_link",
value={
"nodeUuid": "238e5b86-ed65-44b0-9aa4-f0e23ca8a083",
"output": "the_output_of_that_node",
},
),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=PortLink(
nodeUuid="238e5b86-ed65-44b0-9aa4-f0e23ca8a083",
output="the_output_of_that_node",
),
exp_get_value=download_file_folder_name()
/ "file_port_link"
/ another_node_file_name().name,
new_value=this_node_file_name(),
exp_new_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
e_tag=e_tag(),
),
exp_new_get_value=download_file_folder_name()
/ "file_port_link"
/ this_node_file_name().name,
),
id="file node link type gets set back on store",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"data:*/*",
key="file_port_link_with_file_to_key_map",
fileToKeyMap={
"a_cool_file_type.zip": "file_port_link_with_file_to_key_map"
},
value={
"nodeUuid": "238e5b86-ed65-44b0-9aa4-f0e23ca8a083",
"output": "the_output_of_that_node",
},
),
exp_value_type=(Path, str),
exp_value_converter=Path,
exp_value=PortLink(
nodeUuid="238e5b86-ed65-44b0-9aa4-f0e23ca8a083",
output="the_output_of_that_node",
),
exp_get_value=download_file_folder_name()
/ "file_port_link_with_file_to_key_map"
/ "a_cool_file_type.zip",
new_value=this_node_file_name(),
exp_new_value=FileLink(
store=simcore_store_id(),
path=f"{project_id()}/{node_uuid()}/{this_node_file_name().name}",
e_tag=e_tag(),
),
exp_new_get_value=download_file_folder_name()
/ "file_port_link_with_file_to_key_map"
/ "a_cool_file_type.zip",
),
id="file node link type with file to key map gets set back on store",
),
pytest.param(
*PortParams(
port_cfg=create_valid_port_config(
"number",
key="number_port_link",
value={
"nodeUuid": "238e5b86-ed65-44b0-9aa4-f0e23ca8a083",
"output": "the_output_of_that_node",
},
),
exp_value_type=(float),
exp_value_converter=float,
exp_value=PortLink(
nodeUuid="238e5b86-ed65-44b0-9aa4-f0e23ca8a083",
output="the_output_of_that_node",
),
exp_get_value=562.45,
new_value=None,
exp_new_value=None,
exp_new_get_value=None,
),
id="number node link type gets reset",
),
],
)
async def test_valid_port(
common_fixtures: None,
user_id: int,
project_id: str,
node_uuid: str,
port_cfg: Dict[str, Any],
exp_value_type: Type[Union[int, float, bool, str, Path]],
exp_value_converter: Type[Union[int, float, bool, str, Path]],
exp_value: Union[int, float, bool, str, Path, FileLink, DownloadLink, PortLink],
exp_get_value: Union[int, float, bool, str, Path],
new_value: Union[int, float, bool, str, Path],
exp_new_value: Union[int, float, bool, str, Path, FileLink],
exp_new_get_value: Union[int, float, bool, str, Path],
another_node_file: Path,
):
@dataclass
class FakeNodePorts:
user_id: int
project_id: str
node_uuid: str
@staticmethod
async def get(key):
# this gets called when a node links to another node we return the get value but for files it needs to be a real one
return (
another_node_file
if port_cfg["type"].startswith("data:")
else exp_get_value
)
@classmethod
async def _node_ports_creator_cb(cls, node_uuid: str) -> "FakeNodePorts":
return cls(user_id=user_id, project_id=project_id, node_uuid=node_uuid)
@staticmethod
async def save_to_db_cb(node_ports):
return
fake_node_ports = FakeNodePorts(
user_id=user_id, project_id=project_id, node_uuid=node_uuid
)
port = Port(**port_cfg)
port._node_ports = fake_node_ports
# check schema
for k, v in port_cfg.items():
camel_key = camel_to_snake(k)
if k == "type":
camel_key = "property_type"
if k != "value":
assert v == getattr(port, camel_key)
# check payload
assert port._py_value_type == exp_value_type
assert port._py_value_converter == exp_value_converter
assert port.value == exp_value
if isinstance(exp_get_value, Path):
# if it's a file let's create one there already
exp_get_value.parent.mkdir(parents=True, exist_ok=True)
exp_get_value.touch()
if exp_get_value is None:
assert await port.get() == None
else:
assert await port.get() == exp_get_value
if isinstance(exp_value, PortLink) and isinstance(exp_get_value, Path):
# as the file is moved internally we need to re-create it or it fails
another_node_file_name().touch(exist_ok=True)
# it should work several times
assert await port.get() == exp_get_value
# set a new value
await port.set(new_value)
assert port.value == exp_new_value
if isinstance(exp_new_get_value, Path):
# if it's a file let's create one there already
exp_new_get_value.parent.mkdir(parents=True, exist_ok=True)
exp_new_get_value.touch()
if exp_new_get_value is None:
assert await port.get() == None
else:
assert await port.get() == exp_new_get_value
assert await port.get() == exp_new_get_value
@pytest.mark.parametrize(
"port_cfg",
[
{
"key": "some.key",
"label": "some label",
"description": "some description",
"type": "integer",
"displayOrder": 2.3,
},
{
"key": "some:key",
"label": "",
"description": "",
"type": "integer",
"displayOrder": 2.3,
},
{
"key": "some_key",
"label": "",
"description": "",
"type": "blahblah",
"displayOrder": 2.3,
},
{
"key": "some_file_with_file_in_value",
"label": "",
"description": "",
"type": "data:*/*",
"displayOrder": 2.3,
"value": __file__,
},
],
)
def test_invalid_port(common_fixtures: None, port_cfg: Dict[str, Any]):
with pytest.raises(ValidationError):
Port(**port_cfg)
@pytest.mark.parametrize(
"port_cfg", [(create_valid_port_config("data:*/*", key="set_some_inexisting_file"))]
)
async def test_invalid_file_type_setter(
common_fixtures: None, project_id: str, node_uuid: str, port_cfg: Dict[str, Any]
):
port = Port(**port_cfg)
# set a file that does not exist
with pytest.raises(exceptions.InvalidItemTypeError):
await port.set("some/dummy/file/name")
# set a folder fails too
with pytest.raises(exceptions.InvalidItemTypeError):
await port.set(Path(__file__).parent)
| StarcoderdataPython |
159883 | <reponame>satori99/example-custom-config
"""Platform for sensor integration."""
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import DOMAIN
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
# We only want this platform to be set up via discovery.
if discovery_info is None:
return
add_entities([ExampleSensor()])
class ExampleSensor(Entity):
"""Representation of a sensor."""
def __init__(self):
"""Initialize the sensor."""
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return 'Example Temperature'
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
self._state = self.hass.data[DOMAIN]['temperature']
| StarcoderdataPython |
1760782 | <filename>utils.py
# -*- coding: utf-8 -*-
import io
import math
from collections import Counter
from tqdm import tqdm
import tensorflow as tf # TF 2.0
FILE_PATH = './data/'
def create_dataset(path, limit_size=None):
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
lines = ['<s> ' + line + ' </s>' for line in tqdm(lines[:limit_size])]
#lines = [line for line in tqdm(lines[:limit_size])]
# Print examples
for line in lines[:5]:
print(line)
return lines
def tokenize(text, vocab, max_len):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', oov_token='<unk>')
lang_tokenizer.word_index = vocab
tensor = lang_tokenizer.texts_to_sequences(text)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, maxlen=max_len, padding='post')
return tensor, lang_tokenizer
def load_dataset(path, max_len, limit_size=None, lang=['en', 'de']):
dataset_train_input_path = 'train.{}'.format(lang[0])
dataset_train_target_path = 'train.{}'.format(lang[1])
print('Loading...')
vocab_input = load_vocab(path, lang[0])
vocab_target = load_vocab(path, lang[1])
input_text = create_dataset(path + dataset_train_input_path, limit_size)
target_text = create_dataset(path + dataset_train_target_path, limit_size)
print('Tokenizing...')
input_tensor, input_lang_tokenizer = tokenize(input_text, vocab_input, max_len)
target_tensor, target_lang_tokenizer = tokenize(target_text, vocab_target, max_len)
return input_tensor, target_tensor, input_lang_tokenizer, target_lang_tokenizer
def max_length(tensor):
return max(len(t) for t in tensor)
def load_dataset_test(path):
it, tt, ilt, tlt = load_dataset(path, 90, 5000)
print(tt[0].shape)
print(it.shape, tt.shape)
max_it, max_tt = max_length(it), max_length(tt)
print(max_it, max_tt)
def load_vocab(path, lang):
lines = io.open(path + 'vocab.50K.{}'.format(lang), encoding='UTF-8').read().strip().split('\n')
vocab = {}
# 0 is padding
for idx, word in enumerate(lines):
vocab[word] = idx + 1
return vocab
def convert_vocab(tokenizer, vocab):
for key, val in vocab.items():
tokenizer.index_word[val] = key
def select_optimizer(optimizer, learning_rate):
if optimizer == 'adam':
return tf.optimizers.Adam(learning_rate)
elif optimizer == 'sgd':
return tf.optimizers.SGD(learning_rate)
elif optimizer == 'rmsprop':
return tf.optimizers.RMSprop(learning_rate)
def loss_function(loss_object, y_true, y_pred):
mask = tf.math.logical_not(tf.math.equal(y_true, 0))
loss = loss_object(y_true, y_pred)
mask = tf.cast(mask, dtype=loss.dtype)
loss *= mask
return tf.reduce_mean(loss)
def ngrams(text, n):
"""
Argus:
text - list type, Ex. ['I', 'like', 'a', 'dog', '.']
n - Divide by n, int type
"""
if type(text) == str:
text = text.split()
grams = (tuple(text[idx:idx + n]) for idx in range(len(text) - n + 1))
return grams
class BLEU():
"""ref: http://www.nltk.org/_modules/nltk/align/bleu.html
"""
@staticmethod
def compute(candidate, references, weights):
"""
Argus:
candidate - list type
references - dual list type
weights - list type
"""
candidate = [word.lower() for word in candidate]
references = [[word.lower() for word in reference] for reference in references]
p_ns = (BLEU.modified_precision(candidate, references, i) for i, _ in enumerate(weights, start=1))
print(p_ns)
s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)
bp = BLEU.brevity_penalty(candidate, references)
return bp * math.exp(s)
@staticmethod
def modified_precision(candidate, references, n):
counts = Counter(ngrams(candidate, n))
if len(counts) == 0:
return 0
max_counts = {}
for reference in references:
reference_counts = Counter(ngrams(reference, n))
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
clipped_counts = dict((ngram, min(cnt, max_counts[ngram])) for ngram, cnt in counts.items())
return sum(clipped_counts.values()) / sum(counts.values())
@staticmethod
def brevity_penalty(candidate, references):
c = len(candidate)
r = min(abs(len(r) - c) for r in references)
if c > r:
return 1
else:
return math.exp(1 - r / c)
def main():
#load_dataset_test(FILE_PATH)
weights = [0.25, 0.25, 0.25, 0.25]
candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
'ensures', 'that', 'the', 'military', 'always',
'obeys', 'the', 'commands', 'of', 'the', 'party']
reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
'ensures', 'that', 'the', 'military', 'will', 'forever',
'heed', 'Party', 'commands']
reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
'guarantees', 'the', 'military', 'forces', 'always',
'being', 'under', 'the', 'command', 'of', 'the',
'Party']
reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
'army', 'always', 'to', 'heed', 'the', 'directions',
'of', 'the', 'party']
print(ngrams(candidate1, 3))
print(BLEU.compute(candidate1, [reference1, reference2, reference3], weights))
pass
if __name__ == '__main__':
main()
| StarcoderdataPython |
153421 | import datetime
import simplejson as json
from django.conf import settings
from django.http import HttpResponse
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.views.generic import FormView
from .app_settings import SLICK_REPORTING_DEFAULT_END_DATE, SLICK_REPORTING_DEFAULT_START_DATE, \
SLICK_REPORTING_DEFAULT_CHARTS_ENGINE
from .form_factory import report_form_factory
from .generator import ReportGenerator
class SlickReportViewBase(FormView):
group_by = None
columns = None
hidden_columns = None
report_title = ''
time_series_pattern = ''
time_series_columns = None
date_field = None
swap_sign = False
report_generator_class = ReportGenerator
report_model = None
base_model = None
limit_records = None
queryset = None
chart_settings = None
crosstab_model = None
crosstab_ids = None
crosstab_columns = None
crosstab_compute_reminder = True
excluded_fields = None
"""
A list of chart settings objects instructing front end on how to plot the data.
"""
template_name = 'slick_reporting/simple_report.html'
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
self.form = self.get_form(form_class)
if self.form.is_valid():
report_data = self.get_report_results()
if request.is_ajax():
return self.ajax_render_to_response(report_data)
return self.render_to_response(self.get_context_data(report_data=report_data))
return self.render_to_response(self.get_context_data())
@classmethod
def get_report_model(cls):
return cls.report_model or cls.queryset.model
def ajax_render_to_response(self, report_data):
return HttpResponse(self.serialize_to_json(report_data),
content_type="application/json")
def serialize_to_json(self, response_data):
""" Returns the JSON string for the compiled data object. """
def date_handler(obj):
if type(obj) is datetime.datetime:
return obj.strftime('%Y-%m-%d %H:%M')
elif hasattr(obj, 'isoformat'):
return obj.isoformat()
elif isinstance(obj, Promise):
return force_text(obj)
indent = None
if settings.DEBUG:
indent = 4
return json.dumps(response_data, indent=indent, use_decimal=True, default=date_handler)
def get_form_class(self):
"""
Automatically instantiate a form based on details provided
:return:
"""
return self.form_class or report_form_factory(self.get_report_model(), crosstab_model=self.crosstab_model,
display_compute_reminder=self.crosstab_compute_reminder, excluded_fields=self.excluded_fields)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {
'initial': self.get_initial(),
'prefix': self.get_prefix(),
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
elif self.request.method in ('GET', 'PUT'):
# elif self.request.GET:
kwargs.update({
'data': self.request.GET,
'files': self.request.FILES,
})
return kwargs
def get_report_generator(self, queryset, for_print):
q_filters, kw_filters = self.form.get_filters()
if self.crosstab_model:
self.crosstab_ids = self.form.get_crosstab_ids()
crosstab_compute_reminder = self.form.get_crosstab_compute_reminder() if self.request.GET or self.request.POST \
else self.crosstab_compute_reminder
return self.report_generator_class(self.get_report_model(),
start_date=self.form.cleaned_data['start_date'],
end_date=self.form.cleaned_data['end_date'],
q_filters=q_filters,
kwargs_filters=kw_filters,
date_field=self.date_field,
main_queryset=queryset,
print_flag=for_print,
limit_records=self.limit_records, swap_sign=self.swap_sign,
columns=self.columns,
hidden_columns=self.hidden_columns,
group_by=self.group_by,
time_series_pattern=self.time_series_pattern,
time_series_columns=self.time_series_columns,
crosstab_model=self.crosstab_model,
crosstab_ids=self.crosstab_ids,
crosstab_columns=self.crosstab_columns,
crosstab_compute_reminder=crosstab_compute_reminder,
format_row_func=self.format_row
)
def format_row(self, row_obj):
"""
A hook to format each row . This method gets called on each row in the results. <ust return the object
:param row_obj: a dict representing a single row in the results
:return: A dict representing a single row in the results
"""
return row_obj
def get_columns_data(self, columns):
"""
Hook to get the columns information to front end
:param columns:
:return:
"""
# columns = report_generator.get_list_display_columns()
data = []
hidden = self.hidden_columns or []
for col in columns:
data.append({
'name': col['name'],
'computation_field': col.get('original_name', ''),
'verbose_name': col['verbose_name'],
'visible': col.get('visible', col['name'] not in hidden),
'type': col.get('type', 'text'),
'is_summable': col.get('is_summable', ''),
})
return data
def get_report_results(self, for_print=False):
"""
Gets the reports Data, and, its meta data used by datatables.net and highcharts
:return: JsonResponse
"""
queryset = self.get_queryset()
report_generator = self.get_report_generator(queryset, for_print)
data = report_generator.get_report_data()
data = self.filter_results(data, for_print)
data = {
'report_slug': self.get_report_slug(),
'data': data,
'columns': self.get_columns_data(report_generator.get_list_display_columns()),
'metadata': self.get_metadata(generator=report_generator),
'chart_settings': self.get_chart_settings()
}
return data
def get_metadata(self, generator):
"""
A hook to send data about the report for front end which can later be used in charting
:return:
"""
time_series_columns = generator.get_time_series_parsed_columns()
crosstab_columns = generator.get_crosstab_parsed_columns()
metadata = {
'time_series_pattern': self.time_series_pattern,
'time_series_column_names': [x['name'] for x in time_series_columns],
'time_series_column_verbose_names': [x['verbose_name'] for x in time_series_columns],
'crosstab_model': self.crosstab_model or '',
'crosstab_column_names': [x['name'] for x in crosstab_columns],
'crosstab_column_verbose_names': [x['verbose_name'] for x in crosstab_columns],
}
return metadata
def get_chart_settings(self):
"""
Ensure the sane settings are passed to the front end.
"""
output = []
for i, x in enumerate(self.chart_settings or []):
x['id'] = x.get('id', f"{x['type']}-{i}")
if not x.get('title', False):
x['title'] = self.report_title
x['engine_name'] = x.get('engine_name', SLICK_REPORTING_DEFAULT_CHARTS_ENGINE)
output.append(x)
return output
def get_queryset(self):
return self.queryset or self.report_model.objects
def filter_results(self, data, for_print=False):
"""
Hook to Filter results based on computed data (like eliminate __balance__ = 0, etc)
:param data: List of objects
:param for_print: is print request
:return: filtered data
"""
return data
@classmethod
def get_report_slug(cls):
return cls.__name__.lower()
def get_initial(self):
# todo revise why not actually displaying datetime on screen
return {
'start_date': SLICK_REPORTING_DEFAULT_START_DATE,
'end_date': SLICK_REPORTING_DEFAULT_END_DATE
}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if not (self.request.POST or self.request.GET):
# initialize empty form with initials if the no data is in the get or the post
context['form'] = self.get_form_class()()
return context
class SlickReportView(SlickReportViewBase):
def __init_subclass__(cls) -> None:
date_field = getattr(cls, 'date_field', '')
if not date_field:
raise TypeError(f'`date_field` is not set on {cls}')
# sanity check, raises error if the columns or date fields is not mapped
cls.report_generator_class.check_columns([cls.date_field], False, cls.get_report_model())
cls.report_generator_class.check_columns(cls.columns, cls.group_by, cls.get_report_model())
super().__init_subclass__()
| StarcoderdataPython |
1642853 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
LANGUAGE = "english"
SENTENCES_COUNT = 10
if __name__ == "__main__":
url = "https://en.wikipedia.org/wiki/Automatic_summarization"
html='''
<p>Well, I do not know what type of features you are giving to your neural network. However, in general, I would go with a single neural network. It seems that you have no limitation in resources for training your network and the only problem is resources while you apply your network. </p>
<p>The thing is that probably the two problems have things in common (e.g. both types of plates are rectangular). This means that if you use two networks, each has to solve the same sub-problem (the common part) again. If you use only one network the common part of the problem takes fewer cells/weights to be solved and the remaining weights/cells can be employed for better recognition.</p>
<p>In the end, if I was in your place I would try both of them. I think that is the only way to be really sure what is the best solution. When speaking theoretically it is possible that we do not include some factors.</p>
'''
#parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
#parser=HtmlParser.from_string(html, tokenizer=Tokenizer(LANGUAGE), url=None )
# or for plain text files
from post_rec.Utility.TextPreprocessing import InformationAbstrator
text_extractor=InformationAbstrator(100)
text_extractor.initParagraphFilter(text_extractor.lexrankSummary)
plain_text=" ".join( text_extractor.clipText(html) )
parser = PlaintextParser.from_string(plain_text, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
for sentence in summarizer(parser.document, SENTENCES_COUNT):
print(sentence)
| StarcoderdataPython |
187738 | <filename>__init__.py
from .core.detectors import CornerNet, LineNet, CornerNet_Squeeze, CornerNet_Saccade, CornerNet_ifp_Saccade
from .core.vis_utils import draw_bboxes
| StarcoderdataPython |
3335269 | # -*- coding: utf-8 -*-
#
# Copyright The Plasma Project.
# See LICENSE.txt for details.
"""
`ISmallMessage` Flex Messaging compatibility tests.
.. versionadded:: 0.1
"""
import unittest
import datetime
import uuid
import pyamf
from plasma.test import util
from plasma.flex.messaging import messages
from plasma.flex.messaging.messages import small
class BaseTestCase(unittest.TestCase):
"""
Tests for :class:`messages.AbstractMessage`
"""
cls = None
flags = [0x80, 0x80, 0x02]
def test_too_many_flags(self):
"""
Test to check that if more than 2 flags are received in __readamf__
an error will be thrown.
"""
if self.cls is None:
return
class Mock:
b = self.flags
def __init__(self):
self.i = iter(self.b)
def readUnsignedByte(self):
return self.i.next()
def readObject(self):
return {}
a = self.cls()
self.assertRaises(pyamf.DecodeError, a.__readamf__, Mock())
def test_not_enough_flags(self):
if self.cls is None:
return
class Mock:
b = [0x80, 0x80]
def __init__(self):
self.i = iter(self.b)
def readUnsignedByte(self):
try:
return self.i.next()
except StopIteration:
return 0x00
def readObject(self):
return {}
a = self.cls()
self.assertRaises(pyamf.DecodeError, a.__readamf__, Mock())
class AsyncMessageExtTestCase(BaseTestCase):
"""
Tests for :class:`small.AsyncMessageExt`
"""
cls = small.AsyncMessageExt
flags = [0x80, 0x00, 0x80, 0x00]
def test_alias(self):
alias = pyamf.get_class_alias(self.cls)
alias.compile()
self.assertTrue(alias.sealed)
self.assertFalse(alias.dynamic)
self.assertEquals(alias.alias, 'DSA')
self.assertTrue(alias.external)
class AcknowledgeMessageExtTestCase(BaseTestCase):
"""
Tests for :class:`small.AcknowledgeMessageExt`
"""
cls = small.AcknowledgeMessageExt
flags = [0x80, 0x00, 0x80, 0x80, 0x00]
def test_alias(self):
alias = pyamf.get_class_alias(self.cls)
alias.compile()
self.assertTrue(alias.sealed)
self.assertFalse(alias.dynamic)
self.assertEquals(alias.alias, 'DSK')
self.assertTrue(alias.external)
class CommandMessageExtTestCase(BaseTestCase):
"""
Tests for :class:`small.CommandMessageExt`
"""
cls = small.CommandMessageExt
flags = [0x80, 0x00, 0x00, 0x80, 0x00]
def test_flags_no_operation(self):
"""
Test to ensure that 0 is written for the command flag when operation
is `None`.
"""
class MockDataOutput:
written = []
def writeUnsignedByte(self, byte):
self.written.append(byte)
def writeObject(self, object):
self.written.append(object)
a = self.cls()
b = MockDataOutput()
a.operation = None
a.__writeamf__(b)
self.assertEquals(b.written, [0, 1, None, 0])
def test_alias(self):
alias = pyamf.get_class_alias(self.cls)
alias.compile()
self.assertTrue(alias.sealed)
self.assertFalse(alias.dynamic)
self.assertEquals(alias.alias, 'DSC')
self.assertTrue(alias.external)
class EncodingTestCase(unittest.TestCase):
"""
Encoding tests for :mod:`small`
"""
def test_AcknowledgeMessage(self):
m = messages.AcknowledgeMessage(correlationId='1234')
bytes = pyamf.encode(m.getSmallMessage(), encoding=pyamf.AMF3).getvalue()
self.assertEquals(bytes, '\n\x07\x07DSK\x00\x01\x06\t1234')
def test_CommandMessage(self):
m = messages.CommandMessage()
bytes = pyamf.encode(m.getSmallMessage(), encoding=pyamf.AMF3).getvalue()
self.assertEquals(bytes, '\n\x07\x07DSC\x00\x01\x01\x01\x04\xce\x10')
m = messages.CommandMessage(operation='foo.bar')
bytes = pyamf.encode(m.getSmallMessage(), encoding=pyamf.AMF3).getvalue()
self.assertEquals(bytes, '\n\x07\x07DSC\x00\x01\x01\x01\x06\x0ffoo.bar')
class SmallMessageTestCase(unittest.TestCase):
"""
Tests for :class:`messages.SmallMessageMixIn`
"""
def setUp(self):
self.decoder = pyamf.get_decoder(pyamf.AMF3)
self.buffer = self.decoder.stream
def test_acknowledge(self):
bytes = ('\n\x07\x07DSK\xa8\x03\n\x0b\x01%DSMessagingVersion\x05?\xf0'
'\x00\x00\x00\x00\x00\x00\tDSId\x06IEE0D161D-C11D-25CB-8DBE-3B77B'
'54B55D9\x01\x05Br3&m\x85\x10\x00\x0c!\xee\r\x16\x1d\xc1(&[\xc9'
'\x80RK\x9bE\xc6\xc4\x0c!\xee\r\x16\x1d\xc1=\x8e\xa3\xe0\x10\xef'
'\xad;\xe5\xc5j\x02\x0c!S\x84\x83\xdb\xa9\xc8\xcaM`\x952f\xdbQ'
'\xc9<')
self.buffer.write(bytes)
self.buffer.seek(0)
msg = self.decoder.readElement()
self.assertTrue(isinstance(msg, small.AcknowledgeMessageExt))
self.assertEquals(msg.body, None)
self.assertEquals(msg.destination, None)
self.assertEquals(msg.timeToLive, None)
self.assertEquals(msg.timestamp,
datetime.datetime(2009, 8, 19, 11, 24, 43, 985000))
self.assertEquals(msg.headers, {
'DSMessagingVersion': 1.0,
'DSId': u'EE0D161D-C11D-25CB-8DBE-3B77B54B55D9'
})
self.assertEquals(msg.clientId,
uuid.UUID('ee0d161d-c128-265b-c980-524b9b45c6c4'))
self.assertEquals(msg.messageId,
uuid.UUID('ee0d161d-c13d-8ea3-e010-efad3be5c56a'))
self.assertEquals(msg.correlationId,
uuid.UUID('538483db-a9c8-ca4d-6095-3266db51c93c'))
self.assertEquals(self.buffer.remaining(), 0)
# now encode the msg to check that encoding is byte for byte the same
buffer = pyamf.encode(msg, encoding=pyamf.AMF3).getvalue()
self.assertEquals(buffer, bytes)
def test_command(self):
bytes = ('\n\x07\x07DSC\x88\x02\n\x0b\x01\tDSId\x06IEE0D161D-C11D-'
'25CB-8DBE-3B77B54B55D9\x01\x0c!\xc0\xdf\xb7|\xd6\xee$1s\x152f'
'\xe11\xa8f\x01\x06\x01\x01\x04\x02')
self.buffer.write(bytes)
self.buffer.seek(0)
msg = self.decoder.readElement()
self.assertTrue(isinstance(msg, small.CommandMessageExt))
self.assertEquals(msg.body, None)
self.assertEquals(msg.destination, None)
self.assertEquals(msg.timeToLive, None)
self.assertEquals(msg.timestamp, None)
self.assertEquals(msg.headers, {
'DSId': u'EE0D161D-C11D-25CB-8DBE-3B77B54B55D9'
})
self.assertEquals(msg.clientId, None)
self.assertEquals(msg.messageId,
uuid.UUID('c0dfb77c-d6ee-2431-7315-3266e131a866'))
self.assertEquals(msg.correlationId, u'')
self.assertEquals(self.buffer.remaining(), 0)
# now encode the msg to check that encoding is byte for byte the same
buffer = pyamf.encode(msg, encoding=pyamf.AMF3).getvalue()
self.assertEquals(buffer, bytes)
def test_getmessage(self):
"""Tests for `getSmallMessage`"""
for cls in ['AbstractMessage', 'ErrorMessage', 'RemotingMessage']:
cls = getattr(messages, cls)
self.assertRaises(NotImplementedError, cls().getSmallMessage)
kwargs = {
'body': {'foo': 'bar'},
'clientId': 'spam',
'destination': 'eggs',
'headers': {'blarg': 'whoop'},
'messageId': 'baz',
'timestamp': 1234,
'timeToLive': 99
}
# test async
a = messages.AsyncMessage(correlationId='yay', **kwargs)
m = a.getSmallMessage()
k = kwargs.copy()
k.update({'correlationId': 'yay'})
self.assertTrue(isinstance(m, small.AsyncMessageExt))
d = util.dict_for_slots(m)
self.assertEquals(d, k)
# test command
a = messages.CommandMessage(operation='yay', **kwargs)
m = a.getSmallMessage()
k = kwargs.copy()
k.update({
'operation': 'yay',
'correlationId': None
})
self.assertTrue(isinstance(m, small.CommandMessageExt))
d = util.dict_for_slots(m)
self.assertEquals(d, k)
# test ack
a = messages.AcknowledgeMessage(**kwargs)
m = a.getSmallMessage()
k = kwargs.copy()
k.update({'correlationId': None})
self.assertTrue(isinstance(m, small.AcknowledgeMessageExt))
d = util.dict_for_slots(m)
self.assertEquals(d, k)
| StarcoderdataPython |
143383 | <filename>uliweb/utils/timeit.py
import time
from contextlib import contextmanager
@contextmanager
def timeit(output):
"""
If output is string, then print the string and also time used
"""
b = time.time()
yield
print(output, 'time used: %.3fs' % (time.time()-b)) | StarcoderdataPython |
11984 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
convolve_grayscale_padding = __import__(
'2-convolve_grayscale_padding').convolve_grayscale_padding
if __name__ == '__main__':
dataset = np.load('../../supervised_learning/data/MNIST.npz')
images = dataset['X_train']
print(images.shape)
kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
images_conv = convolve_grayscale_padding(images, kernel, (2, 4))
print(images_conv.shape)
plt.imshow(images[0], cmap='gray')
plt.show()
plt.imshow(images_conv[0], cmap='gray')
plt.show()
| StarcoderdataPython |
1655363 | <gh_stars>1-10
import codecs
import pandas as pd
import numpy as np
import argparse
import jieba
import os
def is_chinese(uchar):
if uchar >= u'\u4e00' and uchar <= u'\u9fa5':
return True
else:
return False
def is_punctuation(uchar):
punctuations = [',', '。', '?', '!', ':']
if uchar in punctuations:
return True
else:
return False
def text_process(text):
temp_text = ''
for _ in text:
if is_chinese(_) or is_punctuation(_):
temp_text += _
text = temp_text
return text
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='clean data')
# read outside parameters
parser.add_argument('-write-train', type=str, default='../data/train_clean.csv', help='write train file name')
parser.add_argument('-write-test', type=str, default='../data/test_clean.csv', help='write test file name')
# parser.add_argument('-test', type=int, default=0, help='clean test file')
# parser.add_argument('-train', type=int, default=1, help='clean train file')
args = parser.parse_args()
train=pd.read_csv("../data/Train_DataSet.csv")
train_label_df=pd.read_csv("../data/Train_DataSet_Label.csv")
test=pd.read_csv("../data/Test_DataSet.csv")
train=train.merge(train_label_df,on='id',how='left')
train['label']=train['label'].fillna(-1)
train=train[train['label']!=-1]
train['label']=train['label'].astype(int)
test['content']=test['content'].fillna('无')
train['content']=train['content'].fillna('无')
test['title']=test['title'].fillna('无')
train['title']=train['title'].fillna('无')
# clean train file
for i in train.index:
# clean title
title = text_process(str(train.loc[i, 'title']))
if title == '':
train.loc[i, 'title'] = '无'
else:
train.loc[i, 'title'] = title
# clean content
content = text_process(str(train.loc[i, 'content']))
if content == '':
train.loc[i, 'content'] = '无'
else:
train.loc[i, 'content'] = content
# write to new csv
train.to_csv(args.write_train, index=False, encoding='utf-8')
print('clean train done')
# clean test file
for i in test.index:
title = text_process(str(test.loc[i, 'title']))
if title == '':
test.loc[i, 'title'] = '无'
else:
test.loc[i, 'title'] = title
content = text_process(str(test.loc[i, 'content']))
if content == '':
test.loc[i, 'content'] = '无'
else:
test.loc[i, 'content'] = content
# write to new csv
test.to_csv(args.write_test, index=False, encoding='utf-8')
print('clean test done')
| StarcoderdataPython |
3231121 | <filename>src/fcrypt.py<gh_stars>0
from . import encryptor, logger
import os
import argparse
from platform import system, release
from pathlib import Path
from time import sleep
__version__ = '1.0.0'
system_os = system()
system_release = release()
current_dir = os.getcwd()
default_key_path = '\\default.key' if system_os == 'Windows' else '/default.key'
key = None
info = f"""
#===================================================#
Crypto - File Encryptor/Decryptor
App written by h4sh
Detected OS : {system_os}
OS Release : {system_release}
#===================================================#
"""
epilog = """
Exit Codes:
* 0 -> Success.
* 1 -> Supplied an invalid path.
* 2 -> Failure to load the key file.
* 3 -> Failure to decrypt a specified file.
* 4 -> Failure to encrypt a specified file.
* 5 -> User stopped the action
"""
def setup_parser():
parser = argparse.ArgumentParser(description="Encrypt/Decrypt specified files using Fernet",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
parser.add_argument('path', metavar='path', type=str, help='File to encrypt/decrypt')
parser.add_argument('-k', '--key-path', default=current_dir + default_key_path, type=str, help='Path to the encryption key')
parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose')
parser.add_argument('-d', '--decrypt', action='store_true', help='Decrypt the file')
parser.add_argument('-y', '--yes', action='store_true', help='By-pass safety check')
parser.add_argument('--version', action='version', version=f'Crypto @ {__version__}', help='Display app version')
return parser.parse_args()
def exec():
args = setup_parser()
print(info)
decrypt = args.decrypt
override = args.yes
if not Path(args.path).exists():
logger.error(f'Invalid path : {args.path}')
return 1
if not Path(args.key_path).exists():
logger.warn(f'Key not found : {args.key_path} | Generating new key...')
key = encryptor.key_create()
logger.verbose(f'Key generated. Writing to {args.key_path}', args.verbose)
encryptor.key_write(key, args.key_path)
logger.success(f'New key has been written to: {args.key_path}')
else:
logger.verbose(f'Key found! Loading into memory...', args.verbose)
key = encryptor.key_load(args.key_path)
if key != None:
logger.success(f'Key loaded into memory!')
else:
logger.error(f'Unable to load the key. Exiting...')
return 2
if not override and not decrypt:
logger.warn(f'Are you sure you wish to encrypt "{args.path}"?')
u = input('(y/n)> ').lower()
if u not in ('y', 'ye', 'yes'):
logger.info('Task stopped by the user')
return 5
if decrypt:
logger.info(f'Decrypting -> {args.path}...')
try:
encryptor.file_decrypt(key, args.path, args.path)
logger.success(f'Decryption complete!')
except Exception as e:
logger.error(f'Failed to decrypt -> {e}')
return 3
else:
logger.info(f'Encrypting -> {args.path}...')
try:
encryptor.file_encrypt(key, args.path, args.path)
logger.success(f'Encryption complete!')
except Exception as e:
logger.error(f'Failed to encrypt -> {e}')
return 4
return 0
if __name__ == '__main__':
exec() | StarcoderdataPython |
3336901 | <reponame>paulcacheux/ctw
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 18:24:31 2019
@author: Mathurin
"""
from fractions import Fraction
import markov
import graphviz
class Tree:
def __init__(self, m):
self.m = m
self.top = Node(None, 0, None, [], m)
self.nodes = [self.top]
def insert_node(self, value, depth, parent):
node = Node(value, depth, parent, [], m)
self.nodes.append(node)
return node
def get_node_of_depth(self, depth):
return [node for node in self.nodes if node.depth == depth]
def debug_print(self):
for node in self.nodes:
print("{}".format(repr(node)))
def compute_prob(self, beta):
for node in self.nodes:
node.compute_pe()
self.top.compute_pw(beta)
class Node:
def __init__(self, value, depth, parent, children, m):
self.m = m
self.value = value
self.depth = depth
self.parent = parent
self.children = children
self.count = [0] * m
self.pe = None
def __repr__(self):
return "Node(depth={}, value={}, context={}, count={}, pe={}, pw={})".format(self.depth, self.value, self.get_context(), self.count, float(self.pe), float(self.pw))
def is_leaf(self):
return len(self.children)==0
def get_context(self):
if self.parent is None:
return []
return [self.value] + self.parent.get_context()
def compute_pe(self):
Ms = sum(self.count)
num = 1
for j in range(self.m):
for i in range(self.count[j]):
num *= Fraction(1, 2) + i
den = 1
for i in range(Ms):
den *= Fraction(m, 2) + i
res = num / den
self.pe = res
def compute_pw(self, beta):
if self.is_leaf():
self.pw = self.pe
else:
p = 1
for c in self.children:
c.compute_pw(beta)
p *= c.pw
self.pw = beta * self.pe + (1 - beta) * p
def build_tree(tree, data, D):
for i in range(1, D+1):
for j in range(len(data) - i + 1):
context = data[j:j+i]
value = context[0]
rest = context[1:]
depth = len(rest)
after = None
if j+i < len(data):
after = data[j+i]
for node in tree.get_node_of_depth(depth):
if node.get_context() == rest:
value_node = get_node_in_tree(tree, node, depth, value)
if after is not None:
value_node.count[after] += 1
def get_node_in_tree(tree, current_node, depth, value):
for c in current_node.children:
if c.value == value:
return c
c = tree.insert_node(value, depth + 1, current_node)
current_node.children.append(c)
return c
def compute_input_proba(tree):
proba=1
for node in tree.nodes :
if node.is_leaf(): # le noeud est une feuille
proba*=node.pe
return proba
m = 2
tree = Tree(m)
#input_bits = markov.gen_markov(10)
input_bits=[0,1,0,1,0,1,0,1,0,1,0,1,0,1]
print(input_bits)
build_tree(tree, input_bits, 2)
tree.compute_prob(Fraction(1, 2))
#tree.debug_print()
print(graphviz.main_node_to_graphviz(tree.top))
print(float(compute_input_proba(tree))*100,'%')
| StarcoderdataPython |
14431 | from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy, reverse
from django.shortcuts import redirect
from .models import StockEntry, StockEntryLine
from .forms import StockEntryForm, StockEntryLineForm, StockEntryLineIF
from main.views import BaseView
class StockEntryList(BaseView, ListView):
model = StockEntry
template_name = 'stock/list.html'
paginate_by = 8
permission_required = 'stockentry.view_stockentry'
class StockEntryDetail(BaseView, DetailView):
model = StockEntry
form_class = StockEntryForm
template_name = 'stock/detail.html'
fields = "__all__"
pk_url_kwarg = 'pk'
permission_required = 'stockentry.view_stockentry'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
lines = StockEntryLine.objects.all().filter(parent=self.kwargs['pk'])
new_line = StockEntryLineForm(initial={'parent':self.object})
context['new_line'] = new_line
context['lines'] = lines
return context
class StockEntryCreate(BaseView, CreateView):
model = StockEntry
form_class = StockEntryForm
template_name = 'stock/create.html'
permission_required = 'stockentry.add_stockentry'
def get_success_url(self):
return reverse('stock:detail', kwargs={'pk':self.object.id})
class StockEntryUpdate(BaseView, UpdateView):
model = StockEntry
form_class = StockEntryForm
formset_class = StockEntryLineIF
template_name = 'stock/detail.html'
pk_url_kwarg = 'pk'
success_url = reverse_lazy('stock:detail')
permission_required = 'stockentry.change_stockentry'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# lines = StockEntryLine.objects.all().filter(parent=self.kwargs['pk'])
# new_line = StockEntryLineForm(initial={'parent':self.object})
# context['new_line'] = new_line
# context['lines'] = lines
# return context
# def get_success_url(self):
# pk = self.kwargs['pk']
# return reverse('stock:detail', kwargs={'pk':pk})
def post(self, request, *args, **kwargs):
obj = self.get_object()
if kwargs.get('process') == 'submit':
obj.submit_stock_entry(obj.id)
if kwargs.get('process') == 'cancel':
obj.cancel_stock_entry(obj.id)
return redirect('stock:detail', pk=obj.id)
class StockEntryLineCreate(BaseView, CreateView):
model = StockEntryLine
form_class = StockEntryLineForm
template_name = 'stock/add_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.add_stockentryline'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context['parent'] = self.kwargs['pk']
# return context
def get_success_url(self):
# pk = self.kwargs['pk']
# parent = StockEntry.objects.get(pk=self.kwargs['pk'])
parent_id = self.request.POST['parent']
return reverse('stock:detail', kwargs={'pk':parent_id})
class StockEntryLineEdit(BaseView, UpdateView):
model = StockEntryLine
form_class = StockEntryLineForm
template_name = 'stock/edit_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.change_stockentryline'
def get_success_url(self):
line = StockEntryLine.objects.get(pk=self.kwargs['pk'])
return reverse('stock:detail', kwargs={'pk':line.parent.id})
class StockEntryLineDelete(BaseView, DeleteView):
model = StockEntryLine
template_name = 'stock/delete_line.html'
pk_url_kwarg = 'pk'
permission_required = 'stockentryline.delete_stockentryline'
def get_success_url(self):
return reverse('stock:detail', kwargs={'pk':self.object.parent.id})
| StarcoderdataPython |
4832817 | <filename>schema.py<gh_stars>0
from loopDB import LoopDB # Importing the LoopDB module
loopDB = LoopDB( app.config["DATABASE_URL"] , clean = True)
loopDB.initFromFile('schema.json') # Initialising from the schema file | StarcoderdataPython |
146077 | import re
import pandas as pd
import numpy as np
COLUMN_NAMES = [
'Material family',
'Youngs modulus',
'Specific stiffness',
'Yield strength',
'Tensile strength',
'Specific strength',
'Elongation',
'Compressive strength',
'Flexural modulus',
'Flexural strength',
'Shear modulus',
'Bulk modulus',
'Poisson ratio',
'Shape factor',
'Hardness vickers',
'Elastic stored energy',
'Fatigue strength',
'Fracture toughness',
'Toughness',
'Ductility index',
'Melting point',
'Max service temp',
'Min service temp',
'Thermal conductivity',
'Specific heat capacity',
'Thermal expansion coefficient',
'Thermal shock resistance',
'Thermal distortion resistance',
'Latent heat of fusion',
'Electrical resistivity',
'Electrical conductivity',
'Galvanic potential',
'Mechanical loss coefficient',
]
BAD_PROPERTIES = [
'Elongation', # This is because there's over 600 bad properties in the dataset
'Hardness vickers',
# not worth saving
'Specific stiffness',
'Specific strength',
'Shape factor',
'Elastic stored energy',
'Toughness',
'Ductility index',
'Thermal shock resistance',
'Thermal distortion resistance',
# These properties are not actually real properties of the material
'Max service temp',
'Min service temp',
'Electrical resistivity',
# This really messes with our data selection
'Galvanic potential',
]
# These are materials with invalid values for one or more properties
# This entry is here because there's materials with higher, but valid values
# And I don't want to lose them
BAD_MATERIALS = [
"PEEK/IM carbon fiber, UD prepreg, UD lay-up"
]
def parsing_material_data(material_text, new_file):
with open(material_text, "r") as stuff_to_write:
with open(new_file, "w") as stuff_written:
in_thermal_properties = False
in_electrical_properties = False
in_mechanical_properties = False
in_impact_properties = False
for line in stuff_to_write:
if line.startswith("done"):
stuff_written.write(line)
elif "Mechanical properties" in line:
in_mechanical_properties = True
elif "Impact & fracture properties" in line:
in_mechanical_properties = False
in_impact_properties = True
elif "Thermal properties" in line:
in_thermal_properties = True
in_impact_properties = False
elif "Electrical properties" in line:
in_thermal_properties = False
in_electrical_properties = True
elif "Magnetic properties" in line:
in_electrical_properties = False
in_mechanical_properties = False
in_thermal_properties = False
in_impact_properties = False
elif "Mechanical loss coefficient" in line:
stuff_written.write(line)
elif line.startswith("Material family"):
stuff_written.write(line)
elif in_thermal_properties is True or in_electrical_properties is True or in_mechanical_properties is True or in_impact_properties is True:
stuff_written.write(line)
def parsing_refined_data(new_file):
material_name = []
material_family = []
young_modulus_values = []
specific_stiffness_values = []
yield_strength_values = []
tensile_strength_values = []
specific_strength_values = []
elongation_values = []
compressive_strength_values = []
flexural_modulus_values = []
flexural_strength_values = []
shear_modulus_values = []
bulk_modulus_values = []
poisson_ratio_values = []
shape_factor_values = []
hardness_vickers_values = []
elastic_stored_energy_values = []
fatigue_strength_values = []
fracture_toughness_values = []
toughness_values = []
ductility_index_values = []
melting_values = []
max_service_temp_values = []
min_service_temp_values = []
therm_cond_values = []
spec_heat_cap_values = []
therm_expan_coeff_values = []
therm_shock_resist_values = []
therm_dist_resist_values = []
latent_heat_fusion_values = []
elec_resist_values = []
elec_cond_values = []
galvanic_potential_values = []
mech_loss_coeff_values = []
with open(new_file, "r") as sample_info:
for line in sample_info:
if line.startswith("done"):
for line_item in range(2, len(line.split(' '))):
material_name.append((line.split(' ')[line_item].strip('" ,\t\n')))
elif line.startswith("Material family"):
for line_item in range(1, len(line.split(' ')) - 1):
material_family.append(line.split(' ')[line_item].strip(' ",\t\n'))
elif line.startswith("Young's modulus (10^6 psi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
young_modulus_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
young_modulus_values.append("Null")
else:
young_modulus_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Specific stiffness (lbf.ft/lb)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
specific_stiffness_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
specific_stiffness_values.append("Null")
else:
specific_stiffness_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Yield strength (elastic limit) (ksi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
yield_strength_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
yield_strength_values.append("Null")
else:
yield_strength_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Tensile strength (ksi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
tensile_strength_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
tensile_strength_values.append("Null")
else:
tensile_strength_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Specific strength (lbf.ft/lb)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
specific_strength_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
specific_strength_values.append("Null")
else:
specific_strength_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Elongation (% strain)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
elongation_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
elongation_values.append("Null")
else:
elongation_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Compressive strength (ksi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
compressive_strength_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
compressive_strength_values.append("Null")
else:
compressive_strength_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Flexural modulus (10^6 psi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
flexural_modulus_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
flexural_modulus_values.append("Null")
else:
flexural_modulus_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Flexural strength (modulus of rupture) (ksi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
flexural_strength_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
flexural_strength_values.append("Null")
else:
flexural_strength_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Shear modulus (10^6 psi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
shear_modulus_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
shear_modulus_values.append("Null")
else:
shear_modulus_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Bulk modulus (10^6 psi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
bulk_modulus_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
bulk_modulus_values.append("Null")
else:
bulk_modulus_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Poisson's ratio"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
poisson_ratio_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
poisson_ratio_values.append("Null")
else:
poisson_ratio_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Shape factor"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
shape_factor_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
shape_factor_values.append("Null")
else:
shape_factor_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Hardness - Vickers (HV)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
hardness_vickers_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
hardness_vickers_values.append("Null")
else:
hardness_vickers_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Elastic stored energy (springs) (ft.lbf/in^3)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
elastic_stored_energy_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
elastic_stored_energy_values.append("Null")
else:
elastic_stored_energy_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Fatigue strength at 10^7 cycles (ksi)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
fatigue_strength_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
fatigue_strength_values.append("Null")
else:
fatigue_strength_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Fracture toughness (ksi.in^0.5)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
fracture_toughness_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
fracture_toughness_values.append("Null")
else:
fracture_toughness_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Toughness (G) (ft.lbf/in^2)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
toughness_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
toughness_values.append("Null")
else:
toughness_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Ductility index (mil)"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
ductility_index_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
ductility_index_values.append("Null")
else:
ductility_index_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Melting"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
melting_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
melting_values.append("Null")
else:
melting_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Maximum service temperature"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
max_service_temp_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
max_service_temp_values.append("Null")
else:
max_service_temp_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Minimum service temperature"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
min_service_temp_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
min_service_temp_values.append("Null")
else:
min_service_temp_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Thermal conductivity"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
therm_cond_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
therm_cond_values.append("Null")
else:
therm_cond_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Specific heat capacity"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
spec_heat_cap_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
spec_heat_cap_values.append("Null")
else:
spec_heat_cap_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Thermal expansion coefficient"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
therm_expan_coeff_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
therm_expan_coeff_values.append("Null")
else:
therm_expan_coeff_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Thermal shock resistance"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
therm_shock_resist_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
therm_shock_resist_values.append("Null")
else:
therm_shock_resist_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Thermal distortion resistance"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
therm_dist_resist_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
therm_dist_resist_values.append("Null")
else:
therm_dist_resist_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Latent heat of fusion"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
latent_heat_fusion_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
latent_heat_fusion_values.append("Null")
else:
latent_heat_fusion_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Electrical resistivity"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
elec_resist_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
elec_resist_values.append("Null")
else:
elec_resist_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Electrical conductivity"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
elec_cond_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
elec_cond_values.append("Null")
else:
elec_cond_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Galvanic potential"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
galvanic_potential_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
galvanic_potential_values.append("Null")
else:
galvanic_potential_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
elif line.startswith("Mechanical loss coefficient"):
for line_item in range(1, (len(line.split(",")) - 1)):
if " - " in line.split(",")[line_item].strip(" "):
left_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[0]
right_of_dash = line.split(",")[line_item].strip(" ").split(" - ")[1]
average_of_dash = (float(left_of_dash) + float(right_of_dash)) / 2
mech_loss_coeff_values.append(round(average_of_dash, 6))
elif line.split(",")[line_item] is '':
mech_loss_coeff_values.append("Null")
else:
mech_loss_coeff_values.append(round(float(line.split(",")[line_item].strip(" ")), 6))
#Brinell Hardness, Dielectric Constant, Dielectric Strength, Dissipation Factor, excluded because lacking way too much data (>10 rows of materials non-existent)
null_extender = ["Null"] * 18
material_name.extend(null_extender)
material_family.extend(null_extender)
return(material_name, material_family, young_modulus_values, specific_stiffness_values, yield_strength_values,
tensile_strength_values, specific_strength_values, elongation_values, compressive_strength_values,
flexural_modulus_values, flexural_strength_values, shear_modulus_values, bulk_modulus_values,
poisson_ratio_values, shape_factor_values, hardness_vickers_values,
elastic_stored_energy_values, fatigue_strength_values, fracture_toughness_values, toughness_values,
ductility_index_values, melting_values, max_service_temp_values, min_service_temp_values,
therm_cond_values, spec_heat_cap_values, therm_expan_coeff_values, therm_shock_resist_values, therm_dist_resist_values,
latent_heat_fusion_values, elec_resist_values, elec_cond_values, galvanic_potential_values, mech_loss_coeff_values)
def null_invalid_properties(frame: pd.DataFrame):
# This drops elements in our dataset which have invalid values for any of the following properties
# found to contain a salvagable amount of invalid data
COLUMNS_TO_SCRUB = [
"Tensile strength",
"Compressive strength",
"Flexural modulus",
"Flexural strength",
"Bulk modulus",
"Elastic stored energy",
"Fatigue strength",
"Fracture toughness",
"Toughness",
"Thermal expansion coefficient",
]
for c in COLUMNS_TO_SCRUB:
# They're all some kind of number between 40k and 50k
# with bias towards repeating values
frame[c].mask(frame[c].gt(40000), inplace=True)
# Be gingerly with this column because there's larger entries with valid values
frame["Thermal shock resistance"].mask(frame["Thermal shock resistance"].eq(47650), inplace=True)
frame["Fracture toughness"].mask(frame["Fracture toughness"].gt(12000), inplace=True)
def properties_dataframe_from_file(path: str):
property_lists = parsing_refined_data(path)
frame = pd.DataFrame(index=property_lists[0], data={
k: v for (k, v) in zip(COLUMN_NAMES, property_lists[1:])
})
frame.replace("Null", np.nan, inplace=True)
null_invalid_properties(frame)
return frame
if __name__ == "__main__":
parsing_material_data("C:/Users/Everet/Documents/AMP_Project/Pres_3/Updated_PCM.csv", "C:/Users/Everet/Documents/AMP_Project/Pres_3/new_TEM.csv")
material_name, material_family, young_modulus_values, specific_stiffness_values, yield_strength_values, tensile_strength_values, specific_strength_values, elongation_values, compressive_strength_values, flexural_modulus_values, flexural_strength_values, shear_modulus_values, bulk_modulus_values, poisson_ratio_values, shape_factor_values, hardness_vickers_values, elastic_stored_energy_values, fatigue_strength_values, fracture_toughness_values, toughness_values, ductility_index_values, melting_values, max_service_temp_values, min_service_temp_values, therm_cond_values, spec_heat_cap_values, therm_expan_coeff_values, therm_shock_resist_values, therm_dist_resist_values, latent_heat_fusion_values, elec_resist_values, elec_cond_values, galvanic_potential_values, mech_loss_coeff_values = parsing_refined_data("C:/Users/Everet/Documents/AMP_Project/Pres_3/new_TEM.csv")
print(len(material_name), len(material_family), len(young_modulus_values), len(specific_stiffness_values), len(yield_strength_values), len(tensile_strength_values))
print(material_name[0])
print(material_family[0])
print(material_family[-4])
print(set(material_family))
#print(len(specific_strength_values), len(elongation_values), len(compressive_strength_values), len(flexural_modulus_values), len(flexural_strength_values))
#print(len(shear_modulus_values), len(bulk_modulus_values), len(poisson_ratio_values), len(shape_factor_values), len(hardness_vickers_values))
#print(len(elastic_stored_energy_values), len(fatigue_strength_values), len(fracture_toughness_values), len(toughness_values), len(ductility_index_values))
#print(len(melting_values), len(max_service_temp_values), len(min_service_temp_values), len(therm_cond_values), len(spec_heat_cap_values))
#print(len(therm_expan_coeff_values), len(therm_shock_resist_values), len(therm_dist_resist_values), len(latent_heat_fusion_values), len(elec_resist_values))
#print(len(elec_cond_values), len(galvanic_potential_values), len(mech_loss_coeff_values))
#33 properties total
| StarcoderdataPython |
197755 | <reponame>v22arvind/Plot-Pings-in-Python
import datetime
import os
import re
import sys
import time
from optparse import OptionParser
import numpy as np
# software version
__version__ = "1.1.0"
ping_flag = "n"
if sys.platform != "win32":
ping_flag = "c"
# ping
def pinger(host, n):
"""Executes the PCs ping command"""
proc = os.popen(f"ping -{ping_flag} {n} {host}")
return "".join(proc.readlines())
# wrapper for ping
def call_pinger(host, n):
"""Calls the pinger function and returns results as arrays"""
out = pinger(host, n)
try:
if sys.platform == "win32":
loss_idx = int(re.search("\d+(?=% loss)", out).group(0))
ping_idx = int(re.search("(?<=Average =) \d+", out).group(0))
else:
# the next two lines assume this format:
# 4 packets transmitted, 4 received, 0% packet loss, time 3002ms
# rtt min/avg/max/mdev = 24.146/63.155/128.436/42.823 ms
loss_idx = float(re.search("\d+(?=% packet loss)", out).group(0))
ping_idx = float(out.split("/")[-3])
except:
ping_idx = np.nan # bad connection
loss_idx = 100.0
# append data
# ping = np.append(ping, ping_idx)
# loss = np.append(loss, loss_idx)
# t = np.append(t, time.time())
return ping_idx, loss_idx, out
# writes out to the log file
# def write_log(log_file, log_body):
# """Writes results to a log file"""
# log_file.write(f"TIME: {datetime.datetime.now().ctime()}\n{log_body}\n\n")
# produces ping vs time plot
def plot_gen(ping, now, nans, host, interactive=False, size="1280x640"):
"""Generates ping vs time plot"""
if not interactive:
import matplotlib
matplotlib.use("Agg") # no need to load gui toolkit, can run headless
import matplotlib.pyplot as plt
size = [int(dim) for dim in size.split("x")]
datestr = now[0].ctime().split()
datestr = datestr[0] + " " + datestr[1] + " " + datestr[2] + " " + datestr[-1]
plt.figure(figsize=(size[0] / 80.0, size[1] / 80.0)) # dpi is 80
plt.plot(now[~nans], ping[~nans], drawstyle="steps")
plt.title(f"Ping Results for {host}")
plt.ylabel("Latency [ms]")
plt.xlabel(f"Time, {datestr} [GMT-{time.timezone // 3600}]")
plt.xticks(size=10)
plt.yticks(size=10)
plt.ylim(ping[~nans].min() - 5, ping[~nans].max() + 5)
# plot packet losses
start = []
finish = []
for i in range(len(nans)):
if nans[i]:
if i == 0 or nans[i] != nans[i - 1]:
start.append(i)
if i == len(nans) - 1 or nans[i + 1] != nans[i]:
finish.append(i)
# add the red bars for bad pings
for i in range(len(start)):
plt.axvspan(now[start[i]], now[finish[i]], color="red")
return plt
# main
def main(argv=None):
# for interactive mode
if not argv:
argv = sys.argv[1:]
# handle cmd line arguments
parser = OptionParser()
parser.add_option(
"-p",
"--plot",
dest="plot",
action="store_true",
help="generates plot after data collection is finished",
)
parser.add_option(
"-f",
"--file",
dest="fsave",
action="store_true",
help="save plot to file in the current directory",
)
parser.add_option(
"-H",
"--host",
dest="host",
default="google.com",
help="the url or ip address to ping [default: �fault]",
)
parser.add_option(
"-n",
"--num",
dest="n",
default=1,
type="int",
help="the number of packets to send on each ping iteration [default: �fault]",
)
parser.add_option(
"-t",
"--dt",
dest="dt",
default=0.5,
type="float",
help="the time interval (seconds) in which successive pings are sent [default: �fault s]",
)
parser.add_option(
"-l",
"--log",
dest="log",
action="store_true",
help="save a log file of the event in the current directory",
)
parser.add_option(
"-s",
"--size",
dest="size",
default="1280x640",
help="If plotting/saving a plot, this is the plot's dimensions"
"in pixels (at 80 DPI) in the format XxY [default: 1280x640]",
)
# unpack and initialize data
opts, args = parser.parse_args(argv)
# write log if specified
# if opts.log or opts.fsave:
# log_file.write(f"PingPlot Version {__version__} - Log File\n\n\n")
# start the main loop
print(f"PingPlot Version {__version__} -- by ccampo\n")
print("{0:^23}\n=======================".format("Run Parameters"))
print("{0:>17} {1}".format("Hostname:", opts.host))
print("{0:>17} {1}".format("Ping interval:", str(opts.dt) + " s"))
print("{0:>17} {1}".format("Packets per ping:", opts.n))
print("\n\nPress CTRL+C to quit...\n")
print(
"{0:^15} {1:^15} {2:^15} {3:^15} {4:^15}\n".format(
"AVG. PING", "PACKET LOSS", "NUM. PINGS", "NUM. TIMEOUTS", "TIME ELAPSED"
)
)
while True:
ping = np.array([])
loss = np.array([])
t = np.array([])
now = np.array([])
cnt = 0
save_count = 0
loop_start_time = datetime.datetime.now()
now_time = datetime.datetime.now()
date_str = now_time.isoformat()[:-7][:10]
time_str = f"{now_time.hour}h{now_time.minute}m{now_time.second}s"
stamp = date_str + "_" + time_str
log_name = f"Log\pingplot_v{__version__}_{opts.host}_{stamp}.log"
plot_name = f"Plot\pingplot_v{__version__}_{opts.host}_{stamp}.png"
log_file = open(log_name, "w")
while (datetime.datetime.now() - loop_start_time).total_seconds() / 3600 < 4:
# quit on ctrl+c
try:
ping_idx, loss_idx, out = call_pinger(opts.host, opts.n)
ping = np.append(ping, ping_idx)
loss = np.append(loss, loss_idx)
now = np.append(now, datetime.datetime.now())
t = np.append(t, time.time())
cnt += 1
save_count += 1
log_file.write(
f"{datetime.datetime.now().strftime('%x %X')},{opts.host},{opts.n},{ping_idx},{loss_idx} \n"
)
# get ping data
mean_loss = loss.mean()
nans = np.isnan(ping)
if len(ping[~nans]) > 0:
mean_ping = ping[~np.isnan(ping)].mean()
else:
mean_ping = np.nan
# if opts.log:
# write_log(log_file, out)
# only ping after time dt
time.sleep(opts.dt)
delta_t = datetime.timedelta(seconds=(round(time.time() - t[0], 0)))
sys.stdout.write(
"\r{0:^15.8} {1:^15.10} {2:^15} {3:^15} {4:^15}".format(
str(round(mean_ping, 2)) + " ms",
str(round(mean_loss, 2)) + " %",
cnt * opts.n,
len(ping[nans]),
str(delta_t),
)
)
sys.stdout.flush()
if save_count > 30:
save_count = 0
plt = plot_gen(ping, now, nans, opts.host, opts.plot, opts.size)
plt.savefig(plot_name)
except KeyboardInterrupt:
break
print("\n")
# close log file
print(f"Saved log file {log_name}")
log_file.close()
# make plot to save
# check if any data was collected
if len(ping[~nans]) == 0:
print(
"Error: cannot generate plot; no data collected. Please check your connection."
)
return 2
plt = plot_gen(ping, now, nans, opts.host, opts.plot, opts.size)
print(f"Saved plot {plot_name}")
plt.savefig(plot_name)
# plt.show()
return 2 # exit
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1730868 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Shared interfaces
"""
import logging as log
from pydoc import locate
from dacite import from_dict
from dataclasses import dataclass, field
from typing import List, Optional, Union, cast
from .common import IDebugDecorator
"""
Component config is either a string of the name of component to load.
Example:
{
"mycomponent" : "my.namespace.MyController",
...
}
Or it is a dictionary with a `name` field and parameter overrides for the defaults.
Example:
{
"mycomponent": {
"name": "my.namespace.MyController",
"someParam": 42.0
},
...
}
"""
ComponentConfig = Union[dict, str]
class IDevice:
@dataclass
class Calibration:
servoOffsets: List[int] = field(default_factory=lambda: [0, 0, 0])
# plate calibration as a percentage of frame size
plateXOffset: float = 0.0
plateYOffset: float = 0.0
ballHue: int = 32 # orange/yellow
rotation: float = -30.0 # the camera sits -30deg rotated from plate coords
@dataclass
class Config:
frequencyHz: float = 30.0
joystickThreshold: float = 0.8
menu_idx: int = 0
debug: bool = False
debugDecorator: Optional[ComponentConfig] = None
sensor: Optional[ComponentConfig] = None
detectors: Optional[ComponentConfig] = None
controller: Optional[ComponentConfig] = None
actuator: Optional[ComponentConfig] = None
def __init__(
self,
config: Config,
calibration: Calibration,
debug_decorator: Optional[IDebugDecorator],
):
self.debug_decorator = debug_decorator
self.config = config
# shared per-machine calibration data
self.calibration = calibration
self.next_device: Optional[str] = None
self.previous_menu: int = self.config.menu_idx
def update(self):
pass
def run(self):
pass
def stop(self):
pass
def set_next_device(self, device_name: str):
self.next_device = device_name
def get_next_device(self) -> Optional[str]:
return self.next_device
def component_from_config(
self, component_config: ComponentConfig
) -> Optional[object]:
"""
Import classes with the following signature,
and construct them with their config class.
class Foo:
@dataclass
class Config:
param1: type = default
param2: type = default
...
def __init__(self, config: Config):
...
"""
class_name = ""
config = {}
try:
# if we're just a string expand out into a dict with no config
if type(component_config) is str:
class_name = cast(str, component_config)
elif type(component_config) is dict:
class_name = cast(dict, component_config)["name"]
config = component_config
# locate class ref for module.name.Foo and module.name.Foo.Config
class_ref = locate(class_name, forceload=False)
config_ref = locate(class_name + ".Config", forceload=False)
# unpack dict into dataclass
config_inst = from_dict(config_ref, config)
ref = class_ref(config_inst, self) # type: ignore
except Exception as e:
log.exception(f"Error creating class {class_name}\n{e}")
return None
return ref
| StarcoderdataPython |
102292 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import h5py
import sys
if __name__ == '__main__' :
EXIT_FAILURE = 1
EXIT_SUCCESS = 0
# Check if we have a program argument, otherwise terminate
if len(sys.argv) <= 1 :
print("Usage: " + sys.argv[0] + " H5FILE\n")
sys.exit(EXIT_FAILURE)
filename = sys.argv[1]
# Open HDF5 file
h5in = h5py.File(filename, 'r')
try :
pass
finally :
h5in.close()
| StarcoderdataPython |
1771386 | <filename>numpyro/contrib/module.py<gh_stars>1-10
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from functools import partial
from jax import numpy as jnp
import numpyro
from numpyro.distributions.discrete import PRNGIdentity
def flax_module(name, nn, input_shape=None):
"""
Declare a :mod:`~flax` style neural network inside a
model so that its parameters are registered for optimization via
:func:`~numpyro.primitives.param` statements.
:param str name: name of the module to be registered.
:param flax.nn.Module nn: a `flax` Module which has .init and .apply methods
:param tuple input_shape: shape of the input taken by the
neural network.
:return: a callable with bound parameters that takes an array
as an input and returns the neural network transformed output
array.
"""
try:
import flax # noqa: F401
except ImportError:
raise ImportError("Looking like you want to use flax to declare "
"nn modules. This is an experimental feature. "
"You need to install `flax` to be able to use this feature. "
"It can be installed with `pip install flax`.")
module_key = name + '$params'
nn_params = numpyro.param(module_key)
if nn_params is None:
if input_shape is None:
raise ValueError('Valid value for `input_shape` needed to initialize.')
# feed in dummy data to init params
rng_key = numpyro.sample(name + '$rng_key', PRNGIdentity())
_, nn_params = nn.init(rng_key, jnp.ones(input_shape))
numpyro.param(module_key, nn_params)
return partial(nn.call, nn_params)
def haiku_module(name, nn, input_shape=None):
"""
Declare a :mod:`~haiku` style neural network inside a
model so that its parameters are registered for optimization via
:func:`~numpyro.primitives.param` statements.
:param str name: name of the module to be registered.
:param haiku.Module nn: a `haiku` Module which has .init and .apply methods
:param tuple input_shape: shape of the input taken by the
neural network.
:return: a callable with bound parameters that takes an array
as an input and returns the neural network transformed output
array.
"""
try:
import haiku # noqa: F401
except ImportError:
raise ImportError("Looking like you want to use haiku to declare "
"nn modules. This is an experimental feature. "
"You need to install `haiku` to be able to use this feature. "
"It can be installed with `pip install git+https://github.com/deepmind/dm-haiku`.")
module_key = name + '$params'
nn_params = numpyro.param(module_key)
if nn_params is None:
if input_shape is None:
raise ValueError('Valid value for `input_shape` needed to initialize.')
# feed in dummy data to init params
rng_key = numpyro.sample(name + '$rng_key', PRNGIdentity())
nn_params = nn.init(rng_key, jnp.ones(input_shape))
numpyro.param(module_key, nn_params)
return partial(nn.apply, nn_params, None)
| StarcoderdataPython |
28436 | from setuptools import setup
setup(
name='torch-dimcheck',
version='0.0.1',
description='Dimensionality annotations for tensor parameters and return values',
packages=['torch_dimcheck'],
author='<NAME>',
author_email='<EMAIL>',
)
| StarcoderdataPython |
153474 | from collections import OrderedDict
from decimal import Decimal, ROUND_DOWN
from models import models
def get_results(name):
results = models.Result.select(
models.Result,
models.Call.accept_ap,
models.Call.override_winner
).where(
(models.Result.level == 'state') | (models.Result.level == 'national') | (models.Result.level == 'district'),
models.Result.officename == name
).order_by(
models.Result.statepostal,
models.Result.seatname,
-models.Result.votecount,
models.Result.last
).join(
models.Call,
on=(models.Call.call_id == models.Result.id)
).dicts()
grouped = OrderedDict()
for result in results:
grouped[result['raceid']] = grouped.get(result['raceid'], []) + [result]
return grouped
def comma_filter(value):
"""
Format a number with commas.
"""
return '{:,}'.format(value)
def percent_filter(value):
"""
Format percentage
"""
value = Decimal(value) * Decimal(100)
if value == 0:
return '0%'
elif value == 100:
return '100%'
elif value > 0 and value < 1:
return '<1%'
else:
cleaned_pct = value.quantize(Decimal('.1'), rounding=ROUND_DOWN)
return '{:.1f}%'.format(cleaned_pct)
def never_cache_preview(response):
"""
Ensure preview is never cached
"""
response.cache_control.max_age = 0
response.cache_control.no_cache = True
response.cache_control.must_revalidate = True
response.cache_control.no_store = True
return response
def open_db():
"""
Open db connection
"""
if models.db._local.closed:
models.db.connect()
def close_db(response):
"""
Close db connection
"""
models.db.close()
return response
| StarcoderdataPython |
4821612 | from drf_yasg.utils import swagger_auto_schema
from social_django.utils import load_strategy, load_backend
from social_core.exceptions import MissingBackend
from social_core.backends.oauth import BaseOAuth1
from django.http import HttpResponseRedirect
from django.conf import settings
from django.core import mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from rest_framework import status
from rest_framework.generics import RetrieveUpdateAPIView,\
CreateAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from .renderers import UserJSONRenderer
from .serializers import (LoginSerializer, RegistrationSerializer,
UserSerializer, SocialAuthenticationSerializer,
CreateEmailVerificationSerializer,
PasswordChangeSerializer,
PasswordResetSerializer, PasswordResetTokenSerializer)
from .utils import validate_image
from authors.apps.core.utils import TokenHandler
from threading import Thread
from .models import User, PasswordResetToken
class RegistrationAPIView(APIView):
"""
post:
Register a new user by creating a new user instance.
All newly registered users will have an email sent
to their email address for verification
"""
# Allow any user (authenticated or not) to hit this endpoint.
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = RegistrationSerializer
@swagger_auto_schema(query_serializer=RegistrationSerializer,
responses={201: UserSerializer()})
def post(self, request):
user = request.data.get('user', {})
# The create serializer, validate serializer, save serializer pattern
# below is common and you will see it a lot throughout this course and
# your own work later on. Get familiar with it.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
user_email = serializer.validated_data['email']
username = serializer.validated_data['username']
callback = {'url': serializer.validated_data['callback_url']}
token_payload = {'email': user_email,
'callback_url': callback['url']}
domain = settings.DOMAIN
token = TokenHandler().create_verification_token(token_payload)
template_name = 'email_verification.html'
context = {'username': username, 'token': token, 'domain': domain}
# https://stackoverflow.com/questions/3005080/how-to-send-html-email-with-django-with-dynamic-content-in-it
html_message = render_to_string(template_name, context)
text_message = strip_tags(html_message)
thread = Thread(
target=mail.send_mail, args=[
'Please verify your email',
text_message,
settings.FROM_EMAIL,
[user_email, ],
html_message]
)
thread.setDaemon(True)
thread.start()
message = {
'message': 'Successfully created your account. Please proceed to your email ' + # noqa
user_email + ' to verify your account.'}
serializer.save()
return Response(message, status=status.HTTP_201_CREATED)
class LoginAPIView(APIView):
"""
post:
Login an exising user. Users who have not
verified their accounts should not be
able to log in.
"""
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = LoginSerializer
@swagger_auto_schema(query_serializer=LoginSerializer,
responses={200: UserSerializer()})
def post(self, request):
user = request.data.get('user', {})
# Notice here that we do not call `serializer.save()` like we did for
# the registration endpoint. This is because we don't actually have
# anything to save. Instead, the `validate` method on our serializer
# handles everything we need.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
username = serializer.data.get('username')
instance = User.objects.get(username=username)
user_serializer = UserSerializer(
instance, context={'current_user': request.user})
return Response(user_serializer.data, status=status.HTTP_200_OK)
class UserRetrieveUpdateAPIView(RetrieveUpdateAPIView):
"""
get:
Retrieve details of a user
put:
Update all details of a user
patch:
Update a single detail of a user
"""
permission_classes = (IsAuthenticated,)
renderer_classes = (UserJSONRenderer,)
serializer_class = UserSerializer
def retrieve(self, request, *args, **kwargs):
# There is nothing to validate or save here. Instead, we just want the
# serializer to handle turning our `User` object into something that
# can be JSONified and sent to the client.
serializer = self.serializer_class(
request.user, context={'current_user': request.user})
return Response(serializer.data, status=status.HTTP_200_OK)
def update(self, request, *args, **kwargs):
image = self.request.data.get('image')
validate_image(image)
serializer_data = request.data
user_data = {
'username': serializer_data.get('username', request.user.username),
'email': serializer_data.get('email', request.user.email),
'profile': {
'first_name': serializer_data.get(
'first_name', request.user.profile.last_name),
'last_name': serializer_data.get(
'last_name', request.user.profile.last_name),
'birth_date': serializer_data.get(
'birth_date', request.user.profile.birth_date),
'bio': serializer_data.get('bio', request.user.profile.bio),
'image': serializer_data.get(
'image', request.user.profile.image),
'city': serializer_data.get(
'city', request.user.profile.city),
'country': serializer_data.get(
'country', request.user.profile.country),
'phone': serializer_data.get(
'phone', request.user.profile.phone),
'website': serializer_data.get(
'website', request.user.profile.website),
}
}
# Here is that serialize, validate, save pattern we talked about
# before.
serializer = self.serializer_class(
request.user, data=user_data, partial=True,
context={'current_user': request.user}
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class SocialAuthenticationView(CreateAPIView):
"""
Login to the site via social authentication
services (Google, Twitter, Facebook)
"""
permission_classes = (AllowAny,)
serializer_class = SocialAuthenticationSerializer
renderer_classes = (UserJSONRenderer,)
def create(self, request):
"""Creates user if not present and returns an authentication token"""
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
provider = serializer.data.get("provider")
authenticated_user = request.user if not \
request.user.is_anonymous else None
strategy = load_strategy(request)
# Load backend associated with the provider
try:
backend = load_backend(
strategy=strategy, name=provider, redirect_uri=None)
access_token = serializer.data.get("access_token")
if isinstance(backend, BaseOAuth1):
access_token = {
'oauth_token': request.data['access_token'],
'oauth_token_secret': request.data['access_token_secret']
}
except MissingBackend:
error_msg = """Provider not supported, Please use 'google-oauth2',
'facebook', or 'twitter'."""
return Response({"error": error_msg},
status=status.HTTP_400_BAD_REQUEST)
try:
user = backend.do_auth(access_token, user=authenticated_user)
except BaseException as error:
return Response({"error": str(error)},
status=status.HTTP_400_BAD_REQUEST)
user.is_verified = True
user.save()
serializer = UserSerializer(
user, context={'current_user': request.user})
serializer.instance = user
return Response(serializer.data, status=status.HTTP_200_OK)
class EmailVerificationView(APIView):
"""We need a view that will handle requests for verifying email adresses"""
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = UserSerializer
def get(self, request, token):
decoded_token = TokenHandler().validate_token(token)
if 'email' not in decoded_token:
return Response(
{'error': 'invalid token'},
status=status.HTTP_400_BAD_REQUEST)
# we check if the user exists and whether they are verified.
# if we don't find a user we raise an error
# if we find a registered user, we raise an error
try:
user = User.objects.get(email=decoded_token['email'])
except User.DoesNotExist:
return Response(
{'email': 'No user with this email has been registered'},
status=status.HTTP_404_NOT_FOUND
)
if user.is_verified is True:
return Response(
{'email': 'This email has already been verified'},
status=status.HTTP_400_BAD_REQUEST
)
user.is_verified = True
user.save()
return HttpResponseRedirect(decoded_token['callback_url'])
class CreateEmailVerificationTokenAPIView(APIView):
"""
This class contains method for creating a new verification
token for registered users
"""
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = CreateEmailVerificationSerializer
def post(self, request):
"""This is the method that will be called when users
want a new verification token."""
data = request.data
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
payload = serializer.create_payload(data)
token = TokenHandler().create_verification_token(payload)
user_email = payload['email']
domain = settings.DOMAIN
template_name = 'email_verification.html'
context = {'username': payload['username'],
'token': token, 'domain': domain}
html_message = render_to_string(template_name, context)
text_message = strip_tags(html_message)
thread = Thread(
target=mail.send_mail, args=[
'Please verify your email',
text_message,
settings.FROM_EMAIL,
[user_email, ],
html_message]
)
thread.setDaemon(True)
thread.start()
message = {'message': 'New verification token created. Please proceed to your email ' + # noqa
user_email + ' to verify your account.'}
return Response(message, status=status.HTTP_201_CREATED)
class PasswordResetView(APIView):
"""
post:
Get a user's email where password reset link will be sent.
"""
def post(self, request):
data = request.data.get('payload')
serializer = PasswordResetSerializer(data=data)
serializer.is_valid(raise_exception=True)
user_email = data['email']
callback_url = data['callback_url']
message = "A password reset link has been sent to your email."
try:
user = User.objects.get(email=user_email)
user_id = user.id
payload = {
"email": user_email,
"callback_url": callback_url
}
token = TokenHandler().create_verification_token(payload)
token_data = {
"user": user_id,
"token": token
}
serializer = PasswordResetTokenSerializer(data=token_data)
serializer.is_valid(raise_exception=True)
serializer.save()
TokenHandler().send_password_reset_link(user_email,
token, callback_url)
return Response({"message": message},
status=status.HTTP_200_OK)
except User.DoesNotExist:
return Response(
{"message": message},
status=status.HTTP_200_OK
)
def put(self, request):
"""
put:
Update a user's password with a new password.
"""
try:
data = request.data.get('user_password')
serializer = PasswordChangeSerializer(data=data)
serializer.is_valid(raise_exception=True)
token = data['token']
user = PasswordResetToken.objects.get(token=token)
is_valid = user.is_valid
if is_valid:
credentials = TokenHandler().validate_token(token)
password = data['password']
confirm_password = data['confirm_password']
if password != confirm_password:
return Response({"message": "Passwords do not Match"})
serializer = PasswordChangeSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer = PasswordChangeSerializer(instance=User,
data=data, partial=True)
serializer = User.objects.get(email=credentials['email'])
serializer.set_password(password)
serializer.save()
user.is_valid = False
user.save()
return Response(
{'message': 'Your password has been changed.'},
status=status.HTTP_202_ACCEPTED)
else:
return Response(
{'message': 'Sorry, we couldn\'t find that password reset'
' key in our database. Please send another request.'},
status=status.HTTP_404_NOT_FOUND
)
except PasswordResetToken.DoesNotExist:
return Response(
{'message': 'A user with the given token does not exist.'},
status=status.HTTP_404_NOT_FOUND
)
| StarcoderdataPython |
3248652 | <reponame>feifeigood/watch<gh_stars>1-10
import threading
from datetime import datetime
from time import sleep
from cx_Oracle import DatabaseError, OperationalError
from watch import app, lock, notification_pool, task_pool, unsent_pool
from watch.utils.chat_bot import send_message
from watch.utils.manage_message import t_italic
from watch.utils.parse_args import get_offset
def check_dnd_time():
if not app.config['DND_HOURS']:
return False
start_dnd_hour = app.config['DND_HOURS'][0]
end_dnd_hour = app.config['DND_HOURS'][1]
now_hour = datetime.now().hour
if start_dnd_hour < end_dnd_hour and start_dnd_hour <= now_hour < end_dnd_hour:
return True
if start_dnd_hour >= end_dnd_hour and (now_hour >= start_dnd_hour or now_hour < end_dnd_hour):
return True
return False
def prepare_and_send(chat_id, reply_to_message_id, message, sound):
message_parameters = {'chat_id': chat_id
, 'text': message
, 'parse_mode': 'HTML'
, 'disable_web_page_preview': 'true'}
if reply_to_message_id:
message_parameters['reply_to_message_id'] = reply_to_message_id
if sound == 'no':
message_parameters['disable_notification'] = 'true'
elif sound == 'default':
if check_dnd_time():
message_parameters['disable_notification'] = 'true'
return send_message(message_parameters)
class Worker(threading.Thread):
def __init__(self):
super(Worker, self).__init__()
self.active = True
def run(self):
while self.active:
# Now dnd means "send with no sound". Formerly tasks did were't processing at dnd hours.
# if check_dnd_time():
# sleep(app.config['WORKER_FREQ_SEC'])
# continue
with lock:
active_tasks = tuple(t for t in sorted(task_pool.values()
, key=lambda x: x.priority) if t.state == 'wait')
for task in active_tasks:
with lock:
if not task_pool.get(task.uuid):
continue
if task.last_call:
pt = task.period[-1:]
pv = task.period[:-1]
next_call = task.last_call + get_offset(pv, pt)
if next_call > datetime.now():
continue
task.state = 'run'
try:
task_start_time = datetime.now()
message = app.view_functions[task.endpoint](task)
r = 0
if message:
if task.text:
message = f'{t_italic(task.text)}\n{message}'
notification_pool.appendleft((datetime.now()
, task.uuid
, task.name
, message))
if task.chat_id and not app.config['MUTE_MESSAGES']:
r = prepare_and_send(task.chat_id, task.reply_to_message_id, message, task.sound)
if r != 0:
unsent_pool.appendleft((datetime.now()
, task.uuid
, task.name
, task.chat_id
, task.reply_to_message_id
, message))
if task.finished:
del task_pool[task.uuid]
else:
task.last_call = datetime.now()
task.duration = (task.last_call - task_start_time).seconds
task.execs += 1
task.state = 'wait' if r == 0 or not app.config['FAIL_TASK_ON_MSG_ERROR'] else 'msg error'
# retry sending even if prev msg had no recipient
if r == 0 and message and not app.config['MUTE_MESSAGES']:
while r == 0 and len(unsent_pool) > 0:
m = unsent_pool.popleft()
r = prepare_and_send(m[3]
, m[4]
, f'{t_italic("This message was postponed due to network problem")}'
f'\n{m[5]}'
, 'default')
if r == 0 and task_pool.get(m[1], None):
task_pool[m[1]].state = 'wait'
if r != 0:
unsent_pool.appendleft(m)
except (DatabaseError, OperationalError) as e:
app.logger.error(f'{task.uuid} {e.args[0].message}')
task.state = 'db error'
if app.config['SLEEP_ON_FAIL_SEC'] > 0 and e.args[0].code in (12170, 3113):
app.logger.error(f"Sleeping for {app.config['SLEEP_ON_FAIL_SEC']} seconds...")
sleep(app.config['SLEEP_ON_FAIL_SEC'])
break
sleep(app.config['WORKER_FREQ_SEC'])
def shutdown(self):
self.active = False
| StarcoderdataPython |
3380263 | import sys
import argparse
import numpy as np
import json
from plot_config import * # plot configuration file
width = 4.5 # default_width
height = 3.5 # default_height
def plot_runtime(x, y, z, group_labels, group_size, nolegend=False, mnist=False):
######################## PLOT CODE ########################
ax = plt.figure().gca()
ax.yaxis.grid(color=gridcolor, linestyle=linestyle, linewidth=0.5)
ax.xaxis.grid(color=gridcolor, linestyle=linestyle, linewidth=0.5)
fig = matplotlib.pyplot.gcf()
if mnist:
fig.set_size_inches(width * 1.5, height)
else:
fig.set_size_inches(width, height)
xticks = np.sort(np.unique(x))
secondary_labels = ['Parallelized', '1 CPU']
line_styles = ['solid', 'dashdot']
group_number = 0
for i in range(0, len(y), group_size):
sort = np.argsort(x[i:i+group_size])
# multi-core plot
ax.plot(
x[i:i+group_size][sort],
y[:,0][i:i+group_size][sort],
marker=markers[0],
color=colors[group_number],
lw=linewidth,
ls=line_styles[0],
)
plt.fill_between(
x[i:i+group_size][sort],
y[:,0][i:i+group_size][sort] - y[:,1][i:i+group_size][sort],
y[:,0][i:i+group_size][sort] + y[:,1][i:i+group_size][sort],
color=colors[group_number],
alpha=error_opacity,
linewidth=0,
)
# set the "ghost" labels
ax.plot(np.NaN, np.NaN,
label=group_labels[group_number],
marker=markers[0],
color=colors[group_number],
lw=linewidth,
ls=line_styles[0],
)
# compute single core time
server_time = z[:,0][i:i+group_size][sort][0] # server time on one table
num_tables = x[i:i+group_size][sort]
server_time_single = server_time * num_tables
other_latency = y[:,0][i:i+group_size][sort] - server_time
px = x[i:i+group_size][sort]
py = other_latency + server_time_single
if mnist:
# single core plot
ax.plot(
px,
py,
marker=markers[0],
color=colors[group_number],
lw=linewidth,
ls=line_styles[1],
)
group_number += 1
if not nolegend:
ax2 = ax.twinx() # twin ghost axis to overlay
ax2.plot(np.NaN, np.NaN,
label=secondary_labels[0],
ls=line_styles[0],
lw=linewidth,
color='black',
)
ax2.plot(np.NaN, np.NaN,
label=secondary_labels[1],
ls=line_styles[1],
lw=linewidth,
color='black',
)
ax2.get_yaxis().set_visible(False)
nc = 2
if mnist:
nc = 1 # mnist doesn't have 500 probes
ax.legend(title='Probes', ncol=nc, fancybox=False, loc='upper left', framealpha=0.95, edgecolor=edgecolor)
if mnist:
ax2.legend(loc='upper center', fancybox=False, framealpha=0.95, edgecolor=edgecolor, handlelength=2.5)
ax.set_xticks(xticks)
return ax
if __name__ == '__main__':
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument("--file", type=str, default='')
argparser.add_argument("--cap", type=int, default=4)
argparser.add_argument("--mnist", type=bool, nargs='?', const=True, default=False)
argparser.add_argument("--nolegend", type=bool, nargs='?', const=True, default=False)
args = argparser.parse_args()
if args.mnist:
colors = colors[1:]
# read experiment file (expected json)
with open(args.file, 'r') as myfile:
data=myfile.read()
# parse the experiment file as json
results = json.loads(data)
num_tables = []
num_probes = []
server_time_ms = []
client_latency_ms = []
bandwidth_up_bytes = []
bandwidth_down_bytes = []
bandwidth_total_bytes = []
num_results = 0
num_trials = len(results[0]["query_up_bandwidth_bytes"])
dataset = results[0]["dataset_name"]
# first we extract the relevent bits
for i in range(len(results)):
# skip these; they don't look good in the plots
if results[i]["num_probes"] == 5 or results[i]["num_probes"] == 50:
continue
num_probes.append(results[i]["num_probes"])
num_tables.append(results[i]["num_tables"])
# extract and compute bandwidth statistics
bandwidth_up = np.array(results[i]["query_up_bandwidth_bytes"])
bandwidth_down = np.array(results[i]["query_down_bandwidth_bytes"])
bandwidth_total = bandwidth_up + bandwidth_down
avg = np.mean(bandwidth_total)
std = np.std(bandwidth_total)
bandwidth_total_bytes.append([avg, confidence95(std, num_trials)])
avg_up = np.mean(bandwidth_up)
std_up = np.std(bandwidth_up)
bandwidth_up_bytes.append([avg_up, confidence95(std_up, num_trials)])
avg_down = np.mean(bandwidth_down)
std_down = np.std(bandwidth_down)
bandwidth_down_bytes.append([avg_down, confidence95(std_down, num_trials)])
avg = np.mean(results[i]["query_client_ms"])
std = np.std(results[i]["query_client_ms"])
client_latency_ms.append([avg, confidence95(std, num_trials)])
server_total = np.array(results[i]["dpf_server_ms"]) + np.array(results[i]["masking_server_us"])*MICRO_TO_MILLI
avg = np.mean(server_total)
std = np.std(server_total)
server_time_ms.append([avg, confidence95(std, num_trials)])
num_results += 1
# convert everything to numpy arrays
num_tables = np.array(num_tables)
num_probes = np.array(num_probes)
server_time_ms = np.array(server_time_ms)
client_latency_ms = np.array(client_latency_ms)
bandwidth_total_bytes = np.array(bandwidth_total_bytes)
bandwidth_down_bytes = np.array(bandwidth_down_bytes)
bandwidth_up_bytes = np.array(bandwidth_up_bytes)
group_size = len(np.unique(num_tables))
# make the ANN processing as a function of table size plots
sort = np.argsort(num_probes)
num_tables = num_tables[sort][:args.cap * group_size]
num_probes = num_probes[sort][:args.cap * group_size]
server_time_ms = server_time_ms[sort][:args.cap * group_size]
client_latency_ms = client_latency_ms[sort][:args.cap * group_size]
bandwidth_total_bytes = bandwidth_total_bytes[sort][:args.cap * group_size]
bandwidth_down_bytes = bandwidth_down_bytes[sort][:args.cap * group_size]
bandwidth_up_bytes = bandwidth_up_bytes[sort][:args.cap * group_size]
# figure out how many different groups we have
group_size = len(np.unique(num_tables))
group_labels = np.array([str(i) for i in np.unique(num_probes)])
# plot client end-to-end time
ax = plot_runtime(num_tables, client_latency_ms*MILLI_TO_SECONDS, server_time_ms*MILLI_TO_SECONDS, group_labels, group_size, args.nolegend, args.mnist)
ax.set_xlabel('Number of hash tables')
ax.set_ylabel('Client latency (seconds)')
#ax.set_yscale("log", base=10)
ax.set_ylim(0, ax.get_ylim()[1] * 1.25) # make y axis 25% bigger
ax.set_title(dataset.upper() + " dataset")
ax.figure.tight_layout()
ax.figure.savefig(dataset + '_latency_client.pdf', bbox_inches='tight')
sort = np.argsort(num_tables)
group_size_probes = len(np.unique(num_probes))
print("Bandwidth (kB) per #probes (1 table): " + str(np.sort(bandwidth_total_bytes[:,0][sort][0:group_size_probes])*BYTES_TO_KB))
| StarcoderdataPython |
1740590 | <gh_stars>100-1000
##########################################################################
#
# Copyright (c) 2011-2014, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class AuxiliaryNodeGadgetTest( GafferUITest.TestCase ) :
def testContents( self ) :
n = Gaffer.Node()
g = GafferUI.AuxiliaryNodeGadget( n )
self.assertFalse( g.getContents() )
def testNodules( self ) :
# Test a bunch of things not supported on AuxiliaryGadgets, just to make sure that they return
# None instead of crashing
n = Gaffer.Node()
n["i"] = Gaffer.IntPlug()
g = GafferUI.AuxiliaryNodeGadget( n )
self.assertFalse( g.nodule( n["i"] ) )
def testNoduleTangents( self ) :
n = GafferTest.AddNode()
g = GafferUI.AuxiliaryNodeGadget( n )
self.assertEqual( g.connectionTangent( g.nodule( n["op1"] ) ), imath.V3f( 0, 0, 0 ) )
def testEdgeGadgets( self ) :
n = GafferTest.MultiplyNode()
g = GafferUI.AuxiliaryNodeGadget( n )
for name, edge in g.Edge.names.items() :
self.assertTrue( g.getEdgeGadget( edge ) is None )
eg = GafferUI.TextGadget( name )
g.setEdgeGadget( edge, eg )
self.assertTrue( g.getEdgeGadget( edge ) is None )
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3219875 | <filename>sympy/physics/quantum/tests/test_dagger.py
from sympy import I, Matrix, symbols, conjugate, Expr, Integer
from sympy.physics.quantum.dagger import adjoint, Dagger
from sympy.external import import_module
from sympy.testing.pytest import skip
def test_scalars():
x = symbols('x', complex=True)
assert Dagger(x) == conjugate(x)
assert Dagger(I*x) == -I*conjugate(x)
i = symbols('i', real=True)
assert Dagger(i) == i
p = symbols('p')
assert isinstance(Dagger(p), adjoint)
i = Integer(3)
assert Dagger(i) == i
A = symbols('A', commutative=False)
assert Dagger(A).is_commutative is False
def test_matrix():
x = symbols('x')
m = Matrix([[I, x*I], [2, 4]])
assert Dagger(m) == m.H
class Foo(Expr):
def _eval_adjoint(self):
return I
def test_eval_adjoint():
f = Foo()
d = Dagger(f)
assert d == I
np = import_module('numpy')
def test_numpy_dagger():
if not np:
skip("numpy not installed.")
a = np.matrix([[1.0, 2.0j], [-1.0j, 2.0]])
adag = a.copy().transpose().conjugate()
assert (Dagger(a) == adag).all()
scipy = import_module('scipy', import_kwargs={'fromlist': ['sparse']})
def test_scipy_sparse_dagger():
if not np:
skip("numpy not installed.")
if not scipy:
skip("scipy not installed.")
else:
sparse = scipy.sparse
a = sparse.csr_matrix([[1.0 + 0.0j, 2.0j], [-1.0j, 2.0 + 0.0j]])
adag = a.copy().transpose().conjugate()
assert np.linalg.norm((Dagger(a) - adag).todense()) == 0.0
| StarcoderdataPython |
1659435 | from pyramid.config import Configurator
from clld.interfaces import IMapMarker
from clld.web.icon import ICON_MAP
"""
Even if not used, these models should still be imported. The original comment:
we must make sure custom models are known at database initialization!
"""
from northeuralex import models
"""
An ugly hack to register the following strings for i10n/l18n so that the model
names change in the templates. Ugh!
This is how the problem of renaming the models is handled in other clld apps
(e.g. asjp and wals) as well.
"""
_ = lambda x: x
_('Parameter')
_('Parameters')
"""
Dictionary mapping language families to clld.web.icon.Icon instances. Used in
the get_map_marker hook.
"""
FAMILY_ICONS = {
'Uralic': ICON_MAP['c0000dd'],
'Indo-European': ICON_MAP['c009900'],
'Turkic': ICON_MAP['c990099'],
'Mongolic': ICON_MAP['cdd0000'],
'Tungusic': ICON_MAP['cffff00'],
'Yeniseian': ICON_MAP['cffffff'],
'Yukaghir': ICON_MAP['c00ff00'],
'Chukotko-Kamchatkan': ICON_MAP['c00ffff'],
'Nivkh': ICON_MAP['ccccccc'],
'Ainu': ICON_MAP['cff6600'],
'Koreanic': ICON_MAP['s0000dd'],
'Japonic': ICON_MAP['s009900'],
'Eskimo-Aleut': ICON_MAP['s990099'],
'Dravidian': ICON_MAP['sdd0000'],
'Burushaski': ICON_MAP['sffff00'],
'Kartvelian': ICON_MAP['sffffff'],
'Basque': ICON_MAP['s00ff00'],
'Abkhaz-Adyge': ICON_MAP['s00ffff'],
'Nakh-Daghestanian': ICON_MAP['scccccc'],
'Afro-Asiatic': ICON_MAP['sff6600'],
'Sino-Tibetan': ICON_MAP['t0000dd'],
'_default': ICON_MAP['cff6600'] }
def get_map_marker(item, req):
"""
Hook called for each marker on each map. Determines the map marker for the
given item (the latter would be an instance of a different class depending
on the map). Returns the URL of the selected map marker.
In other words, makes sure that each marker on a map would consistently use
the same icon depending on the language family.
The idea how to achieve different markers for different language families
was stolen from the __init__ module of the sails clld project.
"""
family = None
if isinstance(item, models.Doculect):
family = item.family
elif isinstance(item, models.Synset):
family = item.language.family
if family not in FAMILY_ICONS:
family = '_default'
return FAMILY_ICONS[family].url(req)
def main(global_config, **settings):
"""
Returns a Pyramid WSGI application. Apart from the clld boilerplate, it
orders the home sub-navigation and registers the get_map_marker hook.
"""
config = Configurator(settings=settings)
config.include('clld.web.app')
config.registry.settings['home_comp'] = ['help', 'download', 'legal', 'contact']
config.registry.registerUtility(get_map_marker, IMapMarker)
return config.make_wsgi_app()
| StarcoderdataPython |
117868 | <reponame>haihala/space-tavern
from constants import CONFPATH
from engine import Engine
from json import load, dump
from os import mkdir
import sys
from os.path import isdir, isfile, dirname
def load_config():
with open(CONFPATH) as f:
return load(f)
def main(resolution):
engine = Engine(load_config(), resolution)
engine.run()
if __name__=="__main__":
main([int(i) for i in sys.argv[1:3]])
| StarcoderdataPython |
1701027 | <reponame>k4rrot/escrow-api<filename>api/escrow.py
from datetime import datetime
from flask import current_app, Blueprint, request, make_response, jsonify
from mongoengine.errors import DoesNotExist
import requests
from models.record import EscrowRecord
escrow = Blueprint('escrow', __name__, url_prefix='/escrow')
def resolve_escrow_record(record: EscrowRecord):
disclose = True if record.release_date else False
return {
'id': str(record.id),
'name': record.name,
'data': record.key if disclose else None,
'payment_address': record.payment_address,
'release_amount': record.release_amount,
'create_date': record.create_date.isoformat(),
'release_date': record.release_date.isoformat() if disclose else None,
}
def check_address(address: str, amount: float, method: str):
if method == 'sochain':
resp = requests.get(
'https://sochain.com/api/v2/get_address_balance/BTC/{}'.format(
address
)
)
if resp.status_code != 200:
return False
balance = float(
resp.json().get('data', {}).get('confirmed_balance', 0)
)
if balance >= amount:
return True
# Default to unverified
return False
@escrow.route('/', defaults={'escrow_id': None}, methods=['GET'])
@escrow.route('/<escrow_id>', methods=['GET'])
def get_escrow(escrow_id):
# get all escrow records
if not escrow_id:
skip = request.args.get('skip', 0)
take = request.args.get('take', 100)
all_records = EscrowRecord.objects() \
.order_by('create_date') \
.skip(skip).limit(take)
return make_response(
jsonify([resolve_escrow_record(i) for i in all_records]),
200,
)
# Attempt to retrieve record
try:
record = EscrowRecord.objects.get(id=escrow_id)
if check_address(
record.payment_address,
record.release_amount,
current_app.config.get(
'ADDRESS_VERIFY_METHOD',
),
):
# Release key
record.modify(release_date=datetime.now())
return make_response(
jsonify(
resolve_escrow_record(record)
),
200
)
else:
return make_response(
jsonify(
resolve_escrow_record(record)
),
200
)
except DoesNotExist:
return make_response('', 404)
@escrow.route('/', methods=['POST'])
def create_escrow():
args = request.json
escrow = {
'name': args.get('name')[:256],
'key': args.get('data'),
'payment_address': args.get('address'),
'release_amount': args.get('amount'),
}
if None in list(escrow.keys()):
return make_response(
jsonify(
{'err': 'name, key, address, and amount are required'}
),
400,
)
new_escrow = EscrowRecord(**escrow)
new_escrow.save()
escrow['id'] = str(new_escrow.id)
return make_response(
jsonify(escrow),
200,
)
| StarcoderdataPython |
147977 | #!/usr/bin/python3
# Transfer vector graph (.svg) into LEdit code (.tco)
# usage: python3 svg2tco.py [-s SHIFTX SHIFTY] MASKFILENAME > OUTPUTCODE.tco
# NOTICE: Currently, only boxes(rectangles) & polygons will work.
import xml.dom.minidom
import sys
def polygonProcess(pgpcps): # since codes about polygon and path share the drawing part, so I put them here.
pgpnum=int(len(pgpcps)/2); # number of points
# output
print("polygon ",end=''); # LEDIT command
for pgpit in range(0,pgpnum):
pgx=float(pgpcps[pgpit*2]);
pgy=float(pgpcps[pgpit*2+1]);
# Fix svg cood: (1,-1) * All cood + (0,1052.3622)
pgy=1052.3622-pgy;
# rotate 180: (900,900) - All cood
#pgx=900.-pgx;
#pgy=900.-pgy;
# add shift
pgx=pgx+shiftx;
pgy=pgy+shifty;
# output
print("!%.3f !%.3f " % (pgx,pgy),end=''); # LEDIT script
print("\x0D");
if len(sys.argv)<2:
raise(Exception('Error: no input'));
shiftx=0.;
shifty=0.;
if sys.argv[1].startswith('-'):
option=sys.argv[1][1];
if option=='s':
if len(sys.argv)<5:
raise(Exception('Error: not enough coordinate parameters to shift'));
shiftx=float(sys.argv[2]);
shifty=float(sys.argv[3]);
svgFileName=sys.argv[4];
elif option=='h':
print('# Transfer vector graph (.svg) into LEdit code (.tco)\n# usage: python3 svg2tco.py [-s SHIFTX SHIFTY] MASKFILENAME > OUTPUTCODE.tco\n# NOTICE: Currently, only boxes(rectangles) & polygons will work.');
sys.exit();
else:
raise(Exception('Error: unknown option.'));
sys.exit();
else:
svgFileName=sys.argv[1];
dom = xml.dom.minidom.parse(svgFileName.replace("\n",""));
root = dom.documentElement;
rts = root.getElementsByTagName('rect');
pgs = root.getElementsByTagName('polygon');
pts = root.getElementsByTagName('path');
for rt in rts: # rectangles
rtx=float(rt.getAttribute("x"));
rty=float(rt.getAttribute("y"));
rtw=float(rt.getAttribute("width"));
rth=float(rt.getAttribute("height"));
# Fix svg cood: (1,-1) * All cood + (0,1052.3622)
rty=1052.3622-rty-rth;
# rotate 180: (900,900) - All cood
#rtx=900.-rtx-rtw;
#rty=900.-rty-rth;
# add shift
rtx=rtx+shiftx;
rty=rty+shifty;
# use box center as (x,y)
rtx=rtx+rtw/2.;
rty=rty+rth/2.;
# output
print("box %.3f %.3f !%.3f !%.3f\x0D" % (rtw,rth,rtx,rty)); # LEDIT script
for pg in pgs: # polygons
pgps=pg.getAttribute("points");
pgps=pgps.replace(","," "); # polygon points
pgpcps=pgps.split(); # polygon points coordinate pairs
if (len(pgpcps)%2!=0): # unpaired coordinates of points
pgid="";
try:
pgid=pg.getAttribute("id");
except Exception:
noop;
raise(Exception("Error at polygon "+pgid+": unpaired coordinates."));
polygonProcess(pgpcps);
for pt in pts: # paths (currently polygon only)
ptps=pt.getAttribute("d");
paras=ptps.split(); # commands and parameters
pgset=[]; # set of polygons
pgpcps=[]; # current polygon (described by polygon points coordinate pairs)
paranum=len(paras);
paraiter=0;
com=''; # current command
pos=[0.,0.]; # current position
try:
while (paraiter<paranum):
if (len(paras[paraiter])==1): # command
com=paras[paraiter];
if (com=='z' or com=='Z'): # close path
if (len(pgpcps)>0): # save polygon
pgset.append(pgpcps);
pgpcps=[];
else: # coords
tmpcoord=paras[paraiter].split(',');
if (com=='m'): # move to relative position
if (len(tmpcoord)!=2):
raise(Exception('Error: 2D coordinate needed, '+str(len(tmpcoord))+'D inputed.'));
pos=[float(tmpcoord[0])+pos[0],float(tmpcoord[1])+pos[1]];
if (len(pgpcps)>0): # save the last one (this should be done by command 'Z' or 'z'. but for now we treat all paths as polygons. maybe we should not do this here in future versions.)
pgset.append(pgpcps);
pgpcps=pos;
com='l';
elif (com=='M'): # move to absolute position
if (len(tmpcoord)!=2):
raise(Exception('Error: 2D coordinate needed, '+str(len(tmpcoord))+'D inputed.'));
pos=[float(tmpcoord[0]),float(tmpcoord[1])];
if (len(pgpcps)>0): # save the last one (this should be done by command 'Z' or 'z'. but for now we treat all paths as polygons. maybe we should not do this here in future versions.)
pgset.append(pgpcps);
pgpcps=pos;
com='L';
elif (com=='l'): # line to relative position
if (len(tmpcoord)!=2):
raise(Exception('Error: 2D coordinate needed, '+str(len(tmpcoord))+'D inputed.'));
pos=[float(tmpcoord[0])+pos[0],float(tmpcoord[1])+pos[1]];
pgpcps.extend(pos);
elif (com=='L'): # line to absolute position
if (len(tmpcoord)!=2):
raise(Exception('Error: 2D coordinate needed, '+str(len(tmpcoord))+'D inputed.'));
pos=[float(tmpcoord[0]),float(tmpcoord[1])];
pgpcps.extend(pos);
elif (com=='v'): # vertical line to relative position
if (len(tmpcoord)!=1):
raise(Exception('Error: 1D coordinate needed, '+str(len(tmpcoord))+'D inputed.'));
pos[1]=float(tmpcoord[1])+pos[1];
pgpcps.extend(pos);
elif (com=='V'): # vertical line to absolute position
if (len(tmpcoord)!=1):
raise(Exception('Error: 1D coordinate needed, '+str(len(tmpcoord))+'D inputed.'));
pos[1]=float(tmpcoord[1]);
pgpcps.extend(pos);
elif (com=='h'): # horizontal line to relative position
if (len(tmpcoord)!=1):
raise(Exception('Error: 1D coordinate needed, '+str(len(tmpcoord))+'D inputed.'));
pos[0]=float(tmpcoord[0])+pos[0];
pgpcps.extend(pos);
elif (com=='H'): # horizontal line to absolute position
if (len(tmpcoord)!=1):
raise(Exception('Error: 1D coordinate needed, '+str(len(tmpcoord))+'D inputed.'));
pos[0]=float(tmpcoord[0]);
pgpcps.extend(pos);
elif (com=='z' or com=='Z'): # close path
raise(Exception('Error: Z command need no coordinates, '+str(len(tmpcoord))+'D inputed.'));
else: # curves are not supported yet.
raise(Exception('Error: unsupported command '+com+', '+str(len(tmpcoord))+'D coordinate inputed.'));
paraiter+=1;
except Exception as e:
print(str(e),file=sys.stderr);
for pgpcps in pgset: # polygon points coordinate pairs
if (len(pgpcps)%2!=0): # unpaired coordinates of points
raise(Exception("Internal Error: unpaired coordinates. -> This should never happen. If it happens, then there is a bug."));
polygonProcess(pgpcps);
| StarcoderdataPython |
4838775 | <filename>stubs.min/System/Windows/Forms/__init___parts/PropertyValueChangedEventArgs.py<gh_stars>1-10
class PropertyValueChangedEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.PropertyGrid.PropertyValueChanged event of a System.Windows.Forms.PropertyGrid.
PropertyValueChangedEventArgs(changedItem: GridItem,oldValue: object)
"""
@staticmethod
def __new__(self,changedItem,oldValue):
""" __new__(cls: type,changedItem: GridItem,oldValue: object) """
pass
ChangedItem=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Windows.Forms.GridItem that was changed.
Get: ChangedItem(self: PropertyValueChangedEventArgs) -> GridItem
"""
OldValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The value of the grid item before it was changed.
Get: OldValue(self: PropertyValueChangedEventArgs) -> object
"""
| StarcoderdataPython |
1777700 | <filename>pop_elements_even_index.py
''' write a function that pops all elements at even indexes
'''
def pop_elements(alist):
for i in range(0, len(alist)):
if i%2 == 0:
alist.pop(i)
return alist
| StarcoderdataPython |
3375913 | """Utilities for constructing a metric
"""
import functools
import itertools
from typing import Tuple, Union, List
import sympy
from sympy import Function, sin, Expr, Array, Derivative as D, MatrixBase, Matrix, Symbol
from sympy.diffgeom import twoform_to_matrix
from sympy.printing.latex import latex
from pystein import coords, symbols
from pystein import constants
from pystein.constants import c
from pystein.utilities import tensor_pow as tpow, matrix_to_twoform
class Metric:
"""Metric represents a twoform on a manifold that is symmetric. This class is capable of being
created from and converted to both twoform notation and matrix notation given a coordinate system.
"""
def __init__(self, twoform: Expr = None, matrix: Array = None, coord_system: coords.CoordSystem = None,
components: Tuple[Expr, ...] = None):
"""Create a Metric"""
if twoform is None and matrix is None:
raise ValueError('Must specify either twoform or matrix to produce metric')
# Construct twoform if none given
if twoform is None:
if not isinstance(matrix, MatrixBase):
matrix = Matrix(matrix)
if coord_system is None:
raise ValueError('Must specify coord system if constructing metric from matrix')
twoform = matrix_to_twoform(matrix, coord_system.base_oneforms()) # TODO check ordering of base oneforms?
# Construct matrix if none given
if matrix is None:
matrix = twoform_to_matrix(twoform)
coord_system = coords.CoordSystem.from_twoform(twoform)
# Set instance attributes
self._twoform = twoform
self._matrix = matrix
self._inverse = None # lazy caching of inverse matrix
self.coord_system = coord_system
self.components = components
def __repr__(self):
"""String repr"""
return repr(self.twoform)
def _repr_latex_(self):
"""LaTeX repr in Jupyter"""
s = latex(self.twoform, mode='plain')
return "$\\displaystyle %s$" % s
@property
def twoform(self):
"""Safe accessor for twoform"""
return self._twoform
@property
def matrix(self):
"""Safe accessor for matrix"""
return self._matrix
@property
def inverse(self): # TODO include method parameters in here, for instance pseudo inverse
"""Compute the inverse metric (if possible) and return new Metric instance"""
if self._inverse is None:
self._inverse = self.matrix.inv() # only compute once
return Metric(matrix=self._inverse, coord_system=self.coord_system, components=self.components)
def subs(self, *args, **kwargs):
"""Pass thru to twoform substitution"""
return Metric(twoform=self.twoform.subs(*args, **kwargs),
# TODO make the filtering below more robust
components=tuple(c for c in self.components if not c.subs(*args, **kwargs).doit().is_constant()))
def inner_product(self, vec1: List[sympy.Expr], vec2: List[sympy.Expr]):
N = len(self.coord_system.base_symbols())
res = 0
for mu in range(N):
for nu in range(N):
res += self._matrix[mu, nu] * vec1[mu] * vec2[nu]
return res
def norm(self, vec: List[sympy.Expr]):
return sympy.sqrt(self.inner_product(vec, vec))
def angle(self, vec1, vec2):
norm_ip = self.inner_product(vec1, vec2) / self.norm(vec1) / self.norm(vec2)
return sympy.acos(norm_ip)
def minkowski():
"""Utility for constructing the Minkowski metric
Returns:
Metric, the Minkowski metric for flat space
References:
[1] <NAME>, Cosmology (Oxford University Press, Oxford ; New York, 2008).
"""
cs = coords.cartesian_coords()
dt, dx, dy, dz = cs.base_oneforms()
form = - tpow(dt, 2) + tpow(dx, 2) + tpow(dy, 2) + tpow(dz, 2)
return Metric(twoform=form)
def friedmann_lemaitre_roberston_walker(curvature_constant: Symbol = symbols.k, cartesian: bool = False):
"""Utility for constructing the FLRW metric in terms of a unit lapse and general
scale function `a`.
Args:
curvature_constant:
Symbol, default "k", the curvature parameter in reduced polar coordinates
cartesian:
bool, default False. If true create a cartesian FLRW and ignore curvature_constant argument
Returns:
Metric, the FLRW metric
References:
[1] <NAME>, Cosmology (Oxford University Press, Oxford ; New York, 2008).
"""
a = Function('a')(symbols.t)
if cartesian:
cs = coords.cartesian_coords()
dt, dx, dy, dz = cs.base_oneforms()
form = - c ** 2 * tpow(dt, 2) + a ** 2 * (tpow(dx, 2) + tpow(dy, 2) + tpow(dz, 2))
else:
cs = coords.toroidal_coords()
_, r, theta, _ = cs.base_symbols()
dt, dr, dtheta, dphi = cs.base_oneforms()
form = - c ** 2 * tpow(dt, 2) + a ** 2 * (1 / (1 - curvature_constant * r ** 2) * tpow(dr, 2) + r ** 2 * (tpow(dtheta, 2) + sin(theta) ** 2 * tpow(dphi, 2)))
return Metric(twoform=form, components=(a,))
flrw = friedmann_lemaitre_roberston_walker # shorthand for conventional names
def general_inhomogeneous_isotropic(use_natural_units: bool = True):
"""Utility for constructing a general inhomogeneous, but still isotropic, metric (GIIM). The metric
components M, N, L, S all depend on time and radius, but not theta or phi (hence isotropy).
Returns:
Metric, the GIIM metric
"""
cs = coords.toroidal_coords()
t, r, theta, _ = cs.base_symbols()
dt, dr, dtheta, dphi = cs.base_oneforms()
# Create generic isotropic metric component functions
M = Function('M')(t, r)
N = Function('N')(t, r)
L = Function('L')(t, r)
S = Function('S')(t, r)
form = - c ** 2 * N ** 2 * tpow(dt, 2) + \
L ** 2 * tpow(dr + c * M * dt, 2) + \
S ** 2 * (tpow(dtheta, 2) + sin(theta) ** 2 * tpow(dphi, 2))
if use_natural_units:
form = constants.subs_natural(form)
return Metric(twoform=form, components=(M, N, L, S))
gii = general_inhomogeneous_isotropic # shorthand for conventional names
def _deriv_simplify_rule(component: Function, variables: Union[Expr, Tuple[Expr, ...]], use_dots: bool = False):
"""Helper function for simplifying derivative notation"""
if not isinstance(variables, tuple): # TODO make this "boxing" more robust
variables = (variables,)
args = component.args
order = len(variables)
key = functools.reduce(D, (component,) + variables)
if any(v not in args for v in variables): # check against simplified
return (key, 0)
if len(args) == 1:
fmt = ('\\' + order * 'd' + 'ot{{{}}}') if use_dots else ('{}' + order * '\'')
return (key, Function(fmt.format(component.name))(*args))
fmt = '{}_' + '{{' + ' '.join([v.name for v in variables]) + '}}'
return (key, Function(fmt.format(component.name))(*args))
def simplify_deriv_notation(expr: Expr, metric: Metric, max_order: int = 2, use_dots: bool = False):
"""Utility for Simplifying LaTeX representation of a sympy Expression via substitutions. Note
that this function simplifies the LaTeX output of the Expr at the cost of the Expr semantics,
only use this after all operations on the Expr (including simplification) have been performed.
Args:
expr:
Expr, the sympy expression
metric:
Metric, the metric containing components whose derivatives will be simplified
max_order:
int, default 2, the max derivative order to replace
use_dots:
bool, default False. If True use dot notation for derivatives of single-variable functions
Returns:
Expr, the modified expression. Recall this is only useful for LaTeX conversion, not semantically valid in sympy.
"""
# Create Simplification Shorthand
components = tuple(sorted(metric.components, key=lambda x: x.name))
variables = metric.coord_system.base_symbols()
rules = []
for n in range(1, max_order + 1):
n_order_rules = [_deriv_simplify_rule(c, vs, use_dots=use_dots) for c, vs in
itertools.product(components, itertools.product(*(n * [variables])))]
rules.extend(n_order_rules)
return expr.subs(dict(rules))
| StarcoderdataPython |
3280184 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/text.symbols.ipynb (unless otherwise specified).
__all__ = ['symbols_portuguese', 'PORTUGUESE_SYMBOLS', 'symbols_polish', 'POLISH_SYMBOLS', 'symbols_dutch',
'DUTCH_SYMBOLS', 'symbols_spanish', 'SPANISH_SYMBOLS', 'symbols', 'symbols_nvidia_taco2', 'symbols_with_ipa',
'grad_tts_symbols', 'DEFAULT_SYMBOLS', 'IPA_SYMBOLS', 'NVIDIA_TACO2_SYMBOLS', 'GRAD_TTS_SYMBOLS',
'SYMBOL_SETS', 'symbols_to_sequence', 'arpabet_to_sequence', 'should_keep_symbol', 'symbol_to_id',
'id_to_symbol', 'curly_re', 'words_re']
# Cell
""" from https://github.com/keithito/tacotron """
"""
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. """
from . import cmudict
_pad = "_"
_punctuation_nvidia_taco2 = "!'(),.:;? "
_punctuation = "!'\",.:;? "
_math = "#%&*+-/[]()"
_special = "@©°½—₩€$"
_special_nvidia_taco2 = "-"
_accented = "áçéêëñöøćž"
_numbers = "0123456789"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as
# uppercase letters):
_arpabet = ["@" + s for s in cmudict.valid_symbols]
# Language-specific symbol sets:
_portuguese = "áàãâéèêíìîóòõôúùûçÁÀÃÂÉÈÊÍÌÎÓÒÕÔÚÙÛÇabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
symbols_portuguese = (
[_pad]
+ list(_special_nvidia_taco2)
+ list(_punctuation_nvidia_taco2)
+ list(_portuguese)
+ _arpabet
)
PORTUGUESE_SYMBOLS = "portuguese"
##
_polish = "AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻaąbcćdeęfghijklłmnńoóprsśtuwyzźż"
_punctuation_polish = "!,.? "
symbols_polish = (
[_pad]
+ list(_special_nvidia_taco2)
+ list(_punctuation_polish)
+ list(_polish)
+ _arpabet
)
POLISH_SYMBOLS = "polish"
##
_dutch = "éèêëíìîüÉÈÊËÍÌÎÜabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
symbols_dutch = (
[_pad]
+ list(_special_nvidia_taco2)
+ list(_punctuation_nvidia_taco2)
+ list(_dutch)
+ _arpabet
)
DUTCH_SYMBOLS = "dutch"
##
_spanish = "AÁBCDEÉFGHIÍJKLMNÑOÓPQRSTUÚÜVWXYZaábcdeéfghiíjklmnñoópqrstuúüvwxyz"
_punctuation_spanish = "!¡'(),.:;?¿ "
symbols_spanish = (
[_pad]
+ list(_special_nvidia_taco2)
+ list(_punctuation_spanish)
+ list(_spanish)
+ _arpabet
)
SPANISH_SYMBOLS = "spanish"
# Export all symbols:
symbols = (
list(_pad + _punctuation + _math + _special + _accented + _numbers + _letters)
+ _arpabet
)
symbols_nvidia_taco2 = (
[_pad]
+ list(_special_nvidia_taco2)
+ list(_punctuation_nvidia_taco2)
+ list(_letters)
+ _arpabet
)
symbols_with_ipa = symbols + list(_letters_ipa)
grad_tts_symbols = list(_pad + "-" + "!'(),.:;? " + _letters) + _arpabet
DEFAULT_SYMBOLS = "default"
IPA_SYMBOLS = "ipa"
NVIDIA_TACO2_SYMBOLS = "nvidia_taco2"
GRAD_TTS_SYMBOLS = "gradtts"
SYMBOL_SETS = {
DEFAULT_SYMBOLS: symbols,
IPA_SYMBOLS: symbols_with_ipa,
NVIDIA_TACO2_SYMBOLS: symbols_nvidia_taco2,
GRAD_TTS_SYMBOLS: grad_tts_symbols,
PORTUGUESE_SYMBOLS: symbols_portuguese,
POLISH_SYMBOLS: symbols_polish,
DUTCH_SYMBOLS: symbols_dutch,
SPANISH_SYMBOLS: symbols_spanish,
}
# Cell
import re
symbol_to_id = {
DEFAULT_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[DEFAULT_SYMBOLS])},
IPA_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[IPA_SYMBOLS])},
NVIDIA_TACO2_SYMBOLS: {
s: i for i, s in enumerate(SYMBOL_SETS[NVIDIA_TACO2_SYMBOLS])
},
GRAD_TTS_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[GRAD_TTS_SYMBOLS])},
PORTUGUESE_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[PORTUGUESE_SYMBOLS])},
POLISH_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[POLISH_SYMBOLS])},
DUTCH_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[DUTCH_SYMBOLS])},
SPANISH_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[SPANISH_SYMBOLS])},
}
id_to_symbol = {
DEFAULT_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[DEFAULT_SYMBOLS])},
IPA_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[IPA_SYMBOLS])},
NVIDIA_TACO2_SYMBOLS: {
i: s for i, s in enumerate(SYMBOL_SETS[NVIDIA_TACO2_SYMBOLS])
},
GRAD_TTS_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[GRAD_TTS_SYMBOLS])},
PORTUGUESE_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[PORTUGUESE_SYMBOLS])},
POLISH_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[POLISH_SYMBOLS])},
DUTCH_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[DUTCH_SYMBOLS])},
SPANISH_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[SPANISH_SYMBOLS])},
}
curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
words_re = re.compile(
r"([a-zA-ZÀ-ž]+['][a-zA-ZÀ-ž]{1,2}|[a-zA-ZÀ-ž]+)|([{][^}]+[}]|[^a-zA-ZÀ-ž{}]+)"
)
def symbols_to_sequence(symbols, symbol_set=DEFAULT_SYMBOLS, ignore_symbols=["_", "~"]):
return [
symbol_to_id[symbol_set][s]
for s in symbols
if should_keep_symbol(s, symbol_set, ignore_symbols)
]
def arpabet_to_sequence(text, symbol_set=DEFAULT_SYMBOLS):
return symbols_to_sequence(["@" + s for s in text.split()], symbol_set=symbol_set)
def should_keep_symbol(s, symbol_set=DEFAULT_SYMBOLS, ignore_symbols=["_", "~"]):
return s in symbol_to_id[symbol_set] and s not in ignore_symbols | StarcoderdataPython |
94317 | <reponame>Mymoza/pyannote-audio
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
from pyannote.audio.util import mkdir_p
from .callback import Callback
class Checkpoint(Callback):
"""Model checkpoints"""
def __init__(self):
super().__init__()
def on_train_start(self, trainer):
mkdir_p(trainer.log_dir_)
def load_epoch(self, trainer, epoch):
trainer.load_epoch(epoch)
def on_epoch_end(self, trainer):
trainer.save_epoch()
| StarcoderdataPython |
170517 | <filename>utils/util.py
import sys
import os
import logging
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorboardX import SummaryWriter
class AverageMeter(object):
def __init__(self, name=''):
self._name = name
self.avg = 0.0
self.sum = 0.0
self.cnt = 0.0
def reset(self):
self.avg = 0.0
self.sum = 0.0
self.cnt = 0.0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def __str__(self):
return "%s: %.5f" % (self._name, self.avg)
def get_avg(self):
return self.avg
def __repr__(self):
return self.__str__()
def set_random_seed(seed):
import random
logging.info("Set seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_logger(log_dir=None):
logger = logging.getLogger()
for handler in logger.handlers:
handler.close()
logger.handlers.clear()
log_format = "%(asctime)s | %(message)s"
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format=log_format,
datefmt="%m/%d %I:%M:%S %p")
if log_dir:
os.makedirs(log_dir, exist_ok=True)
file_handler = logging.FileHandler(os.path.join(log_dir, "logger"))
file_handler.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(file_handler)
return logger
def get_writer(title, seed, writer_dir=None):
today = datetime.today()
current_time = today.strftime("%d%m%Y%H%M%S")
writer_dir = os.path.join(
writer_dir,
current_time +
"_{}_{}".format(
title,
seed))
writer = SummaryWriter(log_dir=writer_dir)
return writer
def accuracy(output, target, topk=(1,)):
"""Compute the precision for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
if target.ndimension() > 1:
target = target.max(1)[1]
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(1.0 / batch_size))
return res
def resume_checkpoint(
model,
checkpoint_path,
criterion=None,
optimizer=None,
lr_scheduler=None):
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
if isinstance(checkpoint, dict) and "model" in checkpoint:
model.load_state_dict(checkpoint["model"])
if criterion is not None and "criterion" in checkpoint:
criterion.load_state_dict(checkpoint["criterion"])
if optimizer is not None and "optimizer" in checkpoint:
optimizer.load_state_dict(checkpoint["optimizer"])
if lr_scheduler is not None and "scheduler" in checkpoint:
lr_scheduler.load_state_dict(checkpoint["scheduler"])
if "epoch" in checkpoint:
resume_epoch = checkpoint["epoch"]
else:
model.load_state_dict(checkpoint)
else:
raise
return resume_epoch
def save(
model,
checkpoint_path,
criterion=None,
optimizer=None,
lr_scheduler=None,
resume_epoch=None):
if optimizer is None and resume_epoch is None and lr_scheduler is None:
checkpoint = model.module.state_dict() if isinstance(
model, nn.DataParallel) else model.state_dict()
else:
checkpoint = {"model": model.module.state_dict() if isinstance(
model, nn.DataParallel) else model.state_dict()}
if criterion is not None:
checkpoint["criterion"] = criterion.state_dict()
if optimizer is not None:
checkpoint["optimizer"] = optimizer.state_dict()
if lr_scheduler is not None:
checkpoint["scheduler"] = lr_scheduler.state_dict()
if resume_epoch is not None:
checkpoint["epoch"] = resume_epoch
torch.save(checkpoint, checkpoint_path)
def min_max_normalize(min_value, max_value, value):
new_value = (value - min_value) / (max_value - min_value)
return new_value
| StarcoderdataPython |
1747227 | <filename>freegames/Code Description/bounce_d.py
"""bounce.py를 한국어로 알기 쉽게 설명하는 파일"""
"""Bounce, a simple animation demo. # Bounce, 간단한 애니메이션 데모
Exercises # 연습문제들
1. Make the ball speed up and down. # 1. 공의 속도를 빠르거나 느리게 만들자
2. Change how the ball bounces when it hits a wall. # 2. 공이 벽이랑 부딪힐때 튀기는 방법을 바꿔보자
3. Make the ball leave a trail. # 3. 공이 이동하면서 자취를 남기도록 해보자
4. Change the ball color based on position. # 4. 위치에 따른 공의 색깔을 바꿔보자
Hint: colormode(255); color(0, 100, 200)
"""
from random import * # random 모듈을 불러온다
from turtle import * # turtle 모듈을 불러온다
from freegames import vector # freegames utils.py에서 선언된 vector를 불러온다
def value(): # (-5, -3) 그리고 (3, 5) 사이에만 있는 랜덤한 수를 생성하는 함수
"Randomly generate value between (-5, -3) or (3, 5)."
return (3 + random() * 2) * choice([1, -1])
ball = vector(0, 0) # 공의 초기 좌표를 (0, 0)으로 설정해준다
aim = vector(value(), value()) # aim의 초기 좌표는 value 함수에서 구한 랜덤 값을 각각 x좌표와 y좌표에 넣어준다
def draw(): # 공을 움직여주고 그 화면을 나타내주는 함수
"Move ball and draw game."
ball.move(aim) # 공을 aim에 설정되있는 좌표만큼 움직여준다
x = ball.x # x와 y를 ball.x와 ball.y 값으로 설정해준다
y = ball.y
if x < -200 or x > 200: # 만약 x가 -200보다 작거나 200보다 크면 aim의 x좌표의 부호를 바꿔준다
aim.x = -aim.x
if y < -200 or y > 200: # 만약 y가 -200보다 작거나 200보다 크면 aim의 x좌표의 부호를 바꿔준다
aim.y = -aim.y
clear() # turtle 모듈의 함수인데 거북이를 그대로 둔 채 화면을 지워준다
goto(x, y) # 거북이를 (x, y)좌표로 이동시켜준다
dot(10) # 거북이가 있는 위치에 반경 10인 원을 그려준다
ontimer(draw, 50) # 50ms 마다 draw 함수가 실행되도록 해준다
setup(420, 420, 370, 0) # 초기 그래픽 설정을 해준다
hideturtle() # turtle 모듈의 거북이를 숨겨준다
tracer(False) # 거북이가 움직이는 자취를 숨겨준다
up() # 펜을 들게 해준다
draw() # 들고 있는 펜으로 화면 위에 그림을 그려준다
done() # turtle 모듈을 종료 시켜준다
| StarcoderdataPython |
3354228 | #!/usr/bin/env python
#
# This code implements the segment mining in Entropy/IP
# @1 finds frequency outliers (like constants, enums, etc.)
# @2 finds highly dense ranges of values (like close /32 prefixes)
# @3 finds uniformly distributed ranges of values (like counters)
# @4 prints what didn't get into @1-@3
#
# Note that not every piece of the code was mentioned in the paper, as the
# description would be too detailed - this is a heuristics algorithm.
#
# Mind the different terminology vs. the paper.
# Runs in python2. Requires numpy and scikit-learn.
#
# Copyright (c) 2015-2016 Akamai Technologies, Inc.
# See file "LICENSE" for licensing information.
# Author: <NAME>
#
import sys
from collections import defaultdict
import argparse
import math
import numpy as np
from sklearn.cluster import DBSCAN
# segment labels
SL = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", \
"O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
# parse arguments ASAP
p = argparse.ArgumentParser(description='Entropy/IP: segment mining')
p.add_argument('ips', help='file with IPv6 addresses in full hex form')
p.add_argument('segments', help='file with segments (output of a1-segments.py)')
p.add_argument('--segment', help='alternative segment definition')
args = p.parse_args()
###################################### helper functions
def read_segment(segment):
d = segment.split('-')
return [{"start":int(d[0]), "stop":int(d[1])}]
def read_segments(src):
segments = []
for line in src:
if line[0:10] != "# segment\t": continue
d = line[0:-1].split('\t')
row = {"start":int(d[2]), "stop":int(d[3])}
segments.append(row)
return segments
def read_ips(src, segments):
db = []
N = 0
for segment in segments:
cl = segment.copy()
cl["vals"] = []
db.append(cl)
for line in src:
if len(line) < 32: continue
line = line[:-1].split()[0].lower()
if len(line) > 32: continue
N += 1
for cl in db:
cl["vals"].append(int(line[cl["start"]/4:cl["stop"]/4], 16))
return db, N
###################################### pretty-printers
def ppcnt(pcnt): return "%7.2f%%" % (pcnt)
def pp(vals, counts, N):
if len(counts) == 0: return False
rv = False
# print starting from top-freq
indexer = counts.argsort()[::-1]
for u,c in zip(vals[indexer], counts[indexer]):
pcnt = 100.0*c/N
if pcnt < 0.005: continue
fmt1 = "%0" + str(L/4) + "x"
fmt2 = " "*(2+32-L/4) + "%" + "s"
print (" " + fmt1 + fmt2) % (u, ppcnt(100.0*c/N))
rv = True
return rv
def rpp(vals, counts, L, N):
if len(counts) == 0: return False
rv = False
# print heavy-hitters
if len(counts) > 4:
q1, q3 = np.percentile(counts, [25, 75])
T = min(0.1*N, max(q3 + 1.5*(q1 - q1), 0.02*N))
hhs = counts > T
rv = pp(vals[hhs], counts[hhs], N)
vals = vals[~hhs]
counts = counts[~hhs]
pcnt = 100.0 * sum(counts)/N
if pcnt < 0.05: return rv
if len(vals) < 5:
rv |= pp(vals, counts, N)
else:
fmt1 = "* %0" + str(L/4) + "x" + "-%0" + str(L/4) + "x"
fmt2 = " "*(1+32-L/4-L/4-1) + " %" + "s"
print (fmt1 + fmt2) % (vals.min(), vals.max(), ppcnt(pcnt))
rv = True
return rv
###################################### custom DBSCAN metrics
def metric(p1,p2):
bdiff = math.fabs(p2[0] - p1[0])
pdiff = math.fabs(math.log(p2[2],13) - math.log(p1[2],13))
return bdiff*0.25 + pdiff*50.0
###################################### main
## prepare: read segments and IPs
if args.segment:
segments = read_segment(args.segment)
else:
segments = read_segments(open(args.segments))
db,N = read_ips(open(args.ips), segments)
## for each segment...
for cn,cl in enumerate(db):
L = cl["stop"] - cl["start"]
P = 1.0/2**L
print "%s: bits %d-%d (hex chars %2d-%2d)" \
% (SL[cn], cl["start"], cl["stop"], cl["start"]/4+1, cl["stop"]/4)
### sample IPs if dataset too large?
if len(cl["vals"]) > 50000:
vals = np.random.choice(cl["vals"], size=50000)
else:
vals = np.asarray(cl["vals"])
N = len(vals)
unique,counts = np.unique(vals, return_counts=True)
### detect frequency top-outliers (@1)
if len(counts) > 10:
q1, q3 = np.percentile(counts, [25, 75])
iqr = q3 - q1
T = min(0.1*N, max(q3 + 1.5*iqr, 1.0*P*N))
hhs = counts > T
nhhs = ~hhs
# too many? use top 10th as *threshold*
if sum(hhs) > 10:
indexer = counts.argsort()[::-1]
t10 = max(2, counts[indexer[9]])
hhs = counts >= t10
nhhs = ~hhs
# still too many? just use the top 10
if sum(hhs) > 10:
hhs = indexer[0:10]
nhhs = indexer[10:]
else: # frequency table very short: take all >0.1%
hhs = counts > max(2, 0.001*N)
nhhs = ~hhs
# divide into outliers vs. non-outliers
hhunique = unique[hhs]
hhcounts = counts[hhs]
unique2 = unique[nhhs]
counts2 = counts[nhhs]
# present (sorted by counts)
pp(hhunique, hhcounts, N)
# anything significant left?
if sum(counts2) < 0.001*N:
continue
elif len(counts2) < 5:
pp(unique2, counts2, N)
continue
### find dense regions (@2)
if L >= 8:
dbscan = DBSCAN(eps=(L/4.0)**3.0, min_samples=5)
regions = dbscan.fit_predict(unique2.reshape(-1,1))
labels = set(regions)
left = sum(counts2)
for label in labels:
rvals = unique2[regions == label]
rcounts = counts2[regions == label]
# anything significant?
if label == -1:
continue
elif sum(rcounts) < 0.001*N:
regions[regions == label] = -1
continue
# find density
observedc = float(sum(rcounts))
expectedc = float(rvals.max() - rvals.min()) / (2**L-1) * left
density = observedc / expectedc
if density < 100.0:
regions[regions == label] = -1
continue
rpp(rvals, rcounts, L, N)
unique3 = unique2[regions == -1]
counts3 = counts2[regions == -1]
else:
unique3 = unique2
counts3 = counts2
### find continuous regions of similar probability (@3)
if L >= 8 and len(counts3) > 1:
bincount = min(256, 2**(cl["stop"]-cl["start"]))
hist,bins = np.histogram(unique3, weights=counts3, bins=bincount)
step = bins[1] - bins[0]
data = np.asarray((range(0,len(hist)), bins[:-1], 1.0*hist/N)).T
data = data[data[:,2] > 0.0]
dbscan = DBSCAN(eps=5.0, min_samples=5, metric=metric)
regions = dbscan.fit_predict(data)
labels = set(regions)
cregions = []
for label in labels:
rbins = data[regions == label,1]
rfreqs = data[regions == label,2]
# unlabeled: just background
if label == -1: continue
# anything significant?
if len(rbins) < 5 or sum(rfreqs) < 0.1:
regions[regions == label] = -1
continue
start = rbins.min()
stop = rbins.max() + step
avg = rfreqs.mean()
cregions.append((start, stop, avg))
# convert clusters to ranges
if len(cregions) > 0:
cregions = np.array(cregions)
cregions = list(cregions[np.argsort(cregions[:,0])])
i = 0
while i+1 < len(cregions):
cur = cregions[i]
nxt = cregions[i+1]
if nxt[0] < cur[1]:
if nxt[2] > cur[2]:
if cur[1] > nxt[1]:
cregions.insert(i+2, np.array([nxt[1], cur[1], cur[2]]))
cur[1] = nxt[0]
else:
nxt[0] = cur[1]
i += 1
# print real ranges + probability
for cregion in cregions:
indexer = (unique3 >= cregion[0]) & (unique3 <= cregion[1])
rvals = unique3[indexer]
rcounts = counts3[indexer]
if rpp(rvals, rcounts, L, N):
unique3 = unique3[~indexer]
counts3 = counts3[~indexer]
### print the rest (@4)
pcnt = 100.0 * counts3.sum() / N
rpp(unique3, counts3, L, N)
| StarcoderdataPython |
26979 | from hamcrest import *
from tests.helpers.sql import sql_query
class TestSqlQueries:
def test_sql_select(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
item_definition = item
# When ("Execute SQL query SELECT")
query = f'SELECT * FROM {namespace_name}'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list, has_item(equal_to(item_definition)), "Can't SQL select data")
def test_sql_select_with_join(self, namespace, second_namespace_for_join, index, items):
# Given("Create two namespaces")
db, namespace_name = namespace
second_namespace_name, second_ns_item_definition_join = second_namespace_for_join
# When ("Execute SQL query SELECT with JOIN")
query = f'SELECT id FROM {namespace_name} INNER JOIN {second_namespace_name} ON {namespace_name}.id = {second_namespace_name}.id'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list,
has_item(equal_to({'id': 1, f'joined_{second_namespace_name}': [second_ns_item_definition_join]})),
"Can't SQL select data with JOIN")
def test_sql_select_with_condition(self, namespace, index, items):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query SELECT")
query = f'SELECT * FROM {namespace_name} WHERE id=3'
item_list = sql_query(namespace, query)
# Then ("Check that selected item is in result")
assert_that(item_list, has_item(equal_to({'id': 3, 'val': 'testval3'})), "Can't SQL select data with condition")
def test_sql_update(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query UPDATE")
query = f"UPDATE {namespace_name} SET \"val\" = 'new_val' WHERE id = 100"
item_list = sql_query(namespace, query)
# Then ("Check that item is updated")
assert_that(item_list, has_item(equal_to({'id': 100, 'val': 'new_val'})), "Can't SQL update data")
def test_sql_delete(self, namespace, index, item):
# Given("Create namespace with item")
db, namespace_name = namespace
# When ("Execute SQL query DELETE")
query_delete = f"DELETE FROM {namespace_name} WHERE id = 100"
sql_query(namespace, query_delete)
# Then ("Check that item is deleted")
query_select = f"SELECT * FROM {namespace_name}"
item_list = sql_query(namespace, query_select)
assert_that(item_list, equal_to([]), "Can't SQL delete data")
def test_sql_select_with_syntax_error(self, namespace, index, item):
# Given("Create namespace with item")
# When ("Execute SQL query SELECT with incorrect syntax")
query = f'SELECT *'
# Then ("Check that selected item is in result")
assert_that(calling(sql_query).with_args(namespace, query),
raises(Exception, matching=has_string(string_contains_in_order(
"Expected", "but found"))), "Error wasn't raised when syntax was incorrect")
| StarcoderdataPython |
4823605 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from utilities.commander import Commander
from versioning.git.command import GitCommand
class GitVersioner:
"""
Class which is for interaction with git and version control.
"""
def __init__(self, project_dir: str):
"""
Initializes git versioner.
Arguments:
project_dir {str} -- Defines project root directory.
"""
self.__commander = Commander(project_dir)
def add_changes(self) -> (bool, str):
"""
It collects all changes from the latest commit.
Returns:
{(bool, str)} -- Returns tuple with flag for success and output message.
"""
process = self.__commander.execute(GitCommand.ADD_CHANGES)
(output, _) = process.communicate()
return (process.returncode == 0, output)
def create_commit(self, message: str) -> (bool, str):
"""
Creates a new commit with provided message.
Returns:
{(bool, str)} -- Returns tuple with flag for success and output message.
"""
process = self.__commander.execute(GitCommand.CREATE_COMMIT.format(message))
(output, _) = process.communicate()
return (process.returncode == 0, output)
def create_tag(self, name: str, message: str=None) -> (bool, str):
"""
Creates a new tag for the latest commmit.
Returns:
{(bool, str)} -- Returns tuple with flag for success and output message.
"""
if message is None:
message = ""
process = self.__commander.execute(GitCommand.CREATE_TAG_WITH_MESSAGE.format(name, message))
(output, _) = process.communicate()
return (process.returncode == 0, output)
def push_changes(self) -> (bool, str):
"""
Pushes current changes which includes both commits and tags from current branch.
Returns:
{(bool, str)} -- Returns tuple with flag for success and output message.
"""
process = self.__commander.execute(GitCommand.PUSH_CHANGES)
(output, _) = process.communicate()
return (process.returncode == 0, output)
| StarcoderdataPython |
180425 | <reponame>google-cloud-sdk-unofficial/google-cloud-sdk<gh_stars>1-10
"""Generated message classes for iamcredentials version v1.
Creates short-lived credentials for impersonating IAM service accounts. To
enable this API, you must enable the IAM API (iam.googleapis.com).
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'iamcredentials'
class GenerateAccessTokenRequest(_messages.Message):
r"""A GenerateAccessTokenRequest object.
Fields:
delegates: The sequence of service accounts in a delegation chain. This
field is required for [delegated
requests](https://cloud.google.com/iam/help/credentials/delegated-
request). For [direct
requests](https://cloud.google.com/iam/help/credentials/direct-request),
which are more common, do not specify this field. Each service account
must be granted the `roles/iam.serviceAccountTokenCreator` role on its
next service account in the chain. The last service account in the chain
must be granted the `roles/iam.serviceAccountTokenCreator` role on the
service account that is specified in the `name` field of the request.
The delegates must have the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
lifetime: The desired lifetime duration of the access token in seconds. By
default, the maximum allowed value is 1 hour. To set a lifetime of up to
12 hours, you can add the service account as an allowed value in an
Organization Policy that enforces the
`constraints/iam.allowServiceAccountCredentialLifetimeExtension`
constraint. See detailed instructions at
https://cloud.google.com/iam/help/credentials/lifetime If a value is not
specified, the token's lifetime will be set to a default value of 1
hour.
scope: Required. Code to identify the scopes to be included in the OAuth
2.0 access token. See
https://developers.google.com/identity/protocols/googlescopes for more
information. At least one value required.
"""
delegates = _messages.StringField(1, repeated=True)
lifetime = _messages.StringField(2)
scope = _messages.StringField(3, repeated=True)
class GenerateAccessTokenResponse(_messages.Message):
r"""A GenerateAccessTokenResponse object.
Fields:
accessToken: The OAuth 2.0 access token.
expireTime: Token expiration time. The expiration time is always set.
"""
accessToken = _messages.StringField(1)
expireTime = _messages.StringField(2)
class GenerateIdTokenRequest(_messages.Message):
r"""A GenerateIdTokenRequest object.
Fields:
audience: Required. The audience for the token, such as the API or account
that this token grants access to.
delegates: The sequence of service accounts in a delegation chain. Each
service account must be granted the
`roles/iam.serviceAccountTokenCreator` role on its next service account
in the chain. The last service account in the chain must be granted the
`roles/iam.serviceAccountTokenCreator` role on the service account that
is specified in the `name` field of the request. The delegates must have
the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
includeEmail: Include the service account email in the token. If set to
`true`, the token will contain `email` and `email_verified` claims.
"""
audience = _messages.StringField(1)
delegates = _messages.StringField(2, repeated=True)
includeEmail = _messages.BooleanField(3)
class GenerateIdTokenResponse(_messages.Message):
r"""A GenerateIdTokenResponse object.
Fields:
token: The OpenId Connect ID token.
"""
token = _messages.StringField(1)
class IamcredentialsProjectsServiceAccountsGenerateAccessTokenRequest(_messages.Message):
r"""A IamcredentialsProjectsServiceAccountsGenerateAccessTokenRequest
object.
Fields:
generateAccessTokenRequest: A GenerateAccessTokenRequest resource to be
passed as the request body.
name: Required. The resource name of the service account for which the
credentials are requested, in the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
"""
generateAccessTokenRequest = _messages.MessageField('GenerateAccessTokenRequest', 1)
name = _messages.StringField(2, required=True)
class IamcredentialsProjectsServiceAccountsGenerateIdTokenRequest(_messages.Message):
r"""A IamcredentialsProjectsServiceAccountsGenerateIdTokenRequest object.
Fields:
generateIdTokenRequest: A GenerateIdTokenRequest resource to be passed as
the request body.
name: Required. The resource name of the service account for which the
credentials are requested, in the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
"""
generateIdTokenRequest = _messages.MessageField('GenerateIdTokenRequest', 1)
name = _messages.StringField(2, required=True)
class IamcredentialsProjectsServiceAccountsSignBlobRequest(_messages.Message):
r"""A IamcredentialsProjectsServiceAccountsSignBlobRequest object.
Fields:
name: Required. The resource name of the service account for which the
credentials are requested, in the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
signBlobRequest: A SignBlobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signBlobRequest = _messages.MessageField('SignBlobRequest', 2)
class IamcredentialsProjectsServiceAccountsSignJwtRequest(_messages.Message):
r"""A IamcredentialsProjectsServiceAccountsSignJwtRequest object.
Fields:
name: Required. The resource name of the service account for which the
credentials are requested, in the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
signJwtRequest: A SignJwtRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signJwtRequest = _messages.MessageField('SignJwtRequest', 2)
class SignBlobRequest(_messages.Message):
r"""A SignBlobRequest object.
Fields:
delegates: The sequence of service accounts in a delegation chain. Each
service account must be granted the
`roles/iam.serviceAccountTokenCreator` role on its next service account
in the chain. The last service account in the chain must be granted the
`roles/iam.serviceAccountTokenCreator` role on the service account that
is specified in the `name` field of the request. The delegates must have
the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
payload: Required. The bytes to sign.
"""
delegates = _messages.StringField(1, repeated=True)
payload = _messages.BytesField(2)
class SignBlobResponse(_messages.Message):
r"""A SignBlobResponse object.
Fields:
keyId: The ID of the key used to sign the blob. The key used for signing
will remain valid for at least 12 hours after the blob is signed. To
verify the signature, you can retrieve the public key in several formats
from the following endpoints: - RSA public key wrapped in an X.509 v3
certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x5
09/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.co
m/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key
(JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACC
OUNT_EMAIL}`
signedBlob: The signature for the blob. Does not include the original
blob. After the key pair referenced by the `key_id` response field
expires, Google no longer exposes the public key that can be used to
verify the blob. As a result, the receiver can no longer verify the
signature.
"""
keyId = _messages.StringField(1)
signedBlob = _messages.BytesField(2)
class SignJwtRequest(_messages.Message):
r"""A SignJwtRequest object.
Fields:
delegates: The sequence of service accounts in a delegation chain. Each
service account must be granted the
`roles/iam.serviceAccountTokenCreator` role on its next service account
in the chain. The last service account in the chain must be granted the
`roles/iam.serviceAccountTokenCreator` role on the service account that
is specified in the `name` field of the request. The delegates must have
the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
payload: Required. The JWT payload to sign. Must be a serialized JSON
object that contains a JWT Claims Set. For example: `{"sub":
"<EMAIL>", "iat": 313435}` If the JWT Claims Set contains an
expiration time (`exp`) claim, it must be an integer timestamp that is
not in the past and no more than 12 hours in the future.
"""
delegates = _messages.StringField(1, repeated=True)
payload = _messages.StringField(2)
class SignJwtResponse(_messages.Message):
r"""A SignJwtResponse object.
Fields:
keyId: The ID of the key used to sign the JWT. The key used for signing
will remain valid for at least 12 hours after the JWT is signed. To
verify the signature, you can retrieve the public key in several formats
from the following endpoints: - RSA public key wrapped in an X.509 v3
certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x5
09/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.co
m/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key
(JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACC
OUNT_EMAIL}`
signedJwt: The signed JWT. Contains the automatically generated header;
the client-supplied payload; and the signature, which is generated using
the key referenced by the `kid` field in the header. After the key pair
referenced by the `key_id` response field expires, Google no longer
exposes the public key that can be used to verify the JWT. As a result,
the receiver can no longer verify the signature.
"""
keyId = _messages.StringField(1)
signedJwt = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| StarcoderdataPython |
1683694 | """
KnowYourData
============
A rapid and lightweight module to describe the statistics and structure of
data arrays for interactive use.
The most simple use case to display data is if you have a numpy array 'x':
>>> from knowyourdata import kyd
>>> kyd(x)
"""
import sys
import numpy as np
from IPython.display import display
# Getting HTML Template
from . import kyd_html_display_template
kyd_htmltemplate = kyd_html_display_template.kyd_htmltemplate
class KYD_datasummary(object):
"""A class to store and display the summary information"""
text_repr = ""
html_repr = ""
# Display Settings
col_width = 10
precision = 4
def __repr__(self):
"""
The Plain String Representation of the Data Summary
"""
return self.text_repr
def _repr_html_(self):
"""
The HTML Representation of the Data Summary
"""
return self.html_repr
def make_html_repr(self):
"""Make HTML Representation of Data Summary"""
self.html_repr = kyd_htmltemplate.format(kyd_class=self.kyd_class)
def make_txt_basic_stats(self):
"""Make Text Representation of Basic Statistics"""
pstr_list = []
pstr_struct_header1 = "Basic Statistics "
pstr_struct_header2 = ''
pstr_list.append(pstr_struct_header1)
pstr_list.append(pstr_struct_header2)
template_str = (
" {0:^10} "
" {1:>8} "
" {2:<10} "
" {3:>8} "
" {4:<10} "
)
tmp_data = [
[
"Mean:", "{kyd_class.mean:.{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"",
"Std Dev:", "{kyd_class.std:.{kyd_class.precision}}".format(
kyd_class=self.kyd_class)
],
["Min:", "1Q:", "Median:", "3Q:", "Max:"],
[
"{kyd_class.min: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.firstquartile: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.median: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.thirdquartile: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.max: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
],
['-99 CI:', '-95 CI:', '-68 CI:', '+68 CI:', '+95 CI:', '+99 CI:'],
[
"{kyd_class.ci_99[0]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_95[0]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_68[0]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_68[1]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_95[1]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
"{kyd_class.ci_99[1]: .{kyd_class.precision}}".format(
kyd_class=self.kyd_class),
],
]
n_tmp_data = len(tmp_data)
num_rows_in_cols = [len(i) for i in tmp_data]
num_rows = np.max(num_rows_in_cols)
for i in range(n_tmp_data):
tmp_col = tmp_data[i]
for j in range(num_rows_in_cols[i], num_rows):
tmp_col.append("")
for i in range(num_rows):
pstr_list.append(
template_str.format(
tmp_data[0][i],
tmp_data[1][i],
tmp_data[2][i],
tmp_data[3][i],
tmp_data[4][i],
)
)
return pstr_list
def make_txt_struct(self):
"""Make Text Representation of Array"""
pstr_list = []
# pstr_struct_header0 = "................."
# Commenting out Ansi Coloured Version
# pstr_struct_header1 = '\033[1m' + "Array Structure " + '\033[0m'
pstr_struct_header1 = "Array Structure "
pstr_struct_header2 = " "
# pstr_list.append(pstr_struct_header0)
pstr_list.append(pstr_struct_header1)
pstr_list.append(pstr_struct_header2)
pstr_n_dim = (
"Number of Dimensions:\t"
"{kyd_class.ndim}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_n_dim)
pstr_shape = (
"Shape of Dimensions:\t"
"{kyd_class.shape}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_shape)
pstr_dtype = (
"Array Data Type:\t"
"{kyd_class.dtype}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_dtype)
pstr_memsize = (
"Memory Size:\t\t"
"{kyd_class.human_memsize}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_memsize)
pstr_spacer = ("")
pstr_list.append(pstr_spacer)
pstr_numnan = (
"Number of NaN:\t"
"{kyd_class.num_nan}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_numnan)
pstr_numinf = (
"Number of Inf:\t"
"{kyd_class.num_inf}").format(
kyd_class=self.kyd_class)
pstr_list.append(pstr_numinf)
return pstr_list
def make_text_repr(self):
"""Making final text string for plain text representation"""
tmp_text_repr = ""
tmp_text_repr += "\n"
pstr_basic = self.make_txt_basic_stats()
pstr_struct = self.make_txt_struct()
n_basic = len(pstr_basic)
n_struct = len(pstr_struct)
l_colwidth = max([len(x) for x in pstr_basic]) + 1
r_colwidth = max([len(x) for x in pstr_struct]) + 2
# new_colwidth = self.col_width + 20
# Finding the longest string
len_list = max([n_basic, n_struct])
for i in range(len_list):
tmp_str = '| '
if i < n_basic:
tmp_str += (pstr_basic[i].ljust(l_colwidth))
else:
tmp_str += ''.ljust(l_colwidth)
tmp_str += ' | '
if i < n_struct:
tmp_str += (pstr_struct[i].expandtabs().ljust(r_colwidth))
else:
tmp_str += ''.ljust(r_colwidth)
tmp_str += '\t|'
tmp_text_repr += tmp_str + "\n"
tmp_text_repr += "\n"
self.text_repr = tmp_text_repr
def __init__(self, kyd_class):
super(KYD_datasummary, self).__init__()
self.kyd_class = kyd_class
self.make_text_repr()
self.make_html_repr()
class KYD(object):
"""The Central Class for KYD"""
# Variable for Data Vector
data = None
# Initial Flags
f_allfinite = False
f_allnonfinite = False
f_hasnan = False
f_hasinf = False
# Initialized Numbers
num_nan = 0
num_inf = 0
# Display Settings
col_width = 10
precision = 4
def check_finite(self):
"""Checking to see if all elements are finite and setting flags"""
if np.all(np.isfinite(self.data)):
self.filt_data = self.data
self.f_allfinite = True
else:
finite_inds = np.where(np.isfinite(self.data))
self.filt_data = self.data[finite_inds]
if self.filt_data.size == 0:
self.f_allnonfinite = True
if np.any(np.isnan(self.data)):
self.f_hasnan = True
self.num_nan = np.sum(np.isnan(self.data))
if np.any(np.isinf(self.data)):
self.f_hasinf = True
self.num_inf = np.sum(np.isinf(self.data))
def check_struct(self):
"""Determining the Structure of the Numpy Array"""
self.dtype = self.data.dtype
self.ndim = self.data.ndim
self.shape = self.data.shape
self.size = self.data.size
self.memsize = sys.getsizeof(self.data)
self.human_memsize = sizeof_fmt(self.memsize)
def get_basic_stats(self):
"""Get basic statistics about array"""
if self.f_allnonfinite:
self.min = self.max = self.range = np.nan
self.mean = self.std = self.median = np.nan
self.firstquartile = self.thirdquartile = np.nan
self.ci_68 = self.ci_95 = self.ci_99 = np.array([np.nan, np.nan])
return
self.min = np.float_(np.min(self.filt_data))
self.max = np.float_(np.max(self.filt_data))
self.range = self.max - self.min
self.mean = np.mean(self.filt_data)
self.std = np.std(self.filt_data)
self.median = np.float_(np.median(self.filt_data))
self.firstquartile = np.float_(np.percentile(self.filt_data, 25))
self.thirdquartile = np.float_(np.percentile(self.filt_data, 75))
self.ci_99 = np.float_(
np.percentile(self.filt_data, np.array([0.5, 99.5])))
self.ci_95 = np.float_(
np.percentile(self.filt_data, np.array([2.5, 97.5])))
self.ci_68 = np.float_(
np.percentile(self.filt_data, np.array([16.0, 84.0])))
def make_summary(self):
"""Making Data Summary"""
self.data_summary = KYD_datasummary(self)
def clear_memory(self):
"""Ensuring the Numpy Array does not exist in memory"""
del self.data
del self.filt_data
def display(self, short=False):
"""Displaying all relevant statistics"""
if short:
pass
try:
get_ipython
display(self.data_summary)
except NameError:
print(self.data_summary)
def __init__(self, data):
super(KYD, self).__init__()
# Ensuring that the array is a numpy array
if not isinstance(data, np.ndarray):
data = np.array(data)
self.data = data
self.check_finite()
self.check_struct()
self.get_basic_stats()
self.clear_memory()
self.make_summary()
def sizeof_fmt(num, suffix='B'):
"""Return human readable version of in-memory size.
Code from <NAME> from Stack Overflow:
https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def kyd(data, full_statistics=False):
"""Print statistics of any numpy array
data -- Numpy Array of Data
Keyword arguments:
full_statistics -- printing all detailed statistics of the sources
(Currently Not Implemented)
"""
data_kyd = KYD(data)
if full_statistics:
data_kyd.display()
else:
data_kyd.display(short=True)
return data_kyd
| StarcoderdataPython |
1740004 | <reponame>cfrancisco/dojot
"""
Certificate utilities.
"""
import os
import requests
from src.config import CONFIG
from src.utils import Utils
from src.ejbca.thing import Thing
from src.mqtt_locust.redis_client import RedisClient
LOGGER = Utils.create_logger("cert_utils")
class CertUtils:
"""
Handles certificate-related operations.
"""
@staticmethod
def get_private_key_file(device_id: str) -> str:
"""
Creates the key filename.
"""
Utils.validate_device_id(device_id)
return "{0}.key".format(device_id)
@staticmethod
def get_certificate_file(device_id: str) -> str:
"""
Creates the certificate filename.
"""
Utils.validate_device_id(device_id)
return "{0}.crt".format(device_id)
@staticmethod
def create_cert_files(thing: Thing, directory: str = "/cert/") -> None:
"""Creates the .key and .crt files for a device.
Args:
device_id: device's identification.
thing: Thing object with certificate's info.
directory: directory to save the files.
"""
key_path = directory + CertUtils.get_private_key_file(thing.device_id)
cert_path = directory + CertUtils.get_certificate_file(thing.device_id)
try:
if os.path.isfile(key_path):
os.remove(key_path)
with open(key_path, "w") as key_file:
key_file.write(str(thing.private_key))
if os.path.isfile(cert_path):
os.remove(cert_path)
with open(cert_path, "w") as key_file:
key_file.write(str(thing.thing_certificate))
except Exception as exception:
LOGGER.error("Error: %s", str(exception))
raise
@staticmethod
def new_cert(tenant: str, device_id: str) -> Thing:
"""
Creates/renovates the certificate for a device.
"""
Utils.validate_tenant(tenant)
Utils.validate_device_id(device_id)
thing = Thing(tenant, device_id)
return thing
@staticmethod
def revoke_cert(thing: Thing) -> None:
"""
Revokes a certificate for a specific device.
"""
thing.cert.revoke_cert()
@staticmethod
def has_been_revoked(thing: Thing) -> bool:
"""
Verifies whether the certificate has been revoked or not.
"""
url = CONFIG["dojot"]["url"]+ "/x509/v1/certificates/" + thing.cert.crt['fingerprint']
res = requests.get(
url=url,
headers={
"Authorization": "Bearer {0}".format(RedisClient().get_jwt())
}
)
return res.status_code == 404
| StarcoderdataPython |
3374241 | <gh_stars>10-100
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import metrics
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python import ipu
def _get_model(compile_metrics):
model_layers = [
layers.Dense(3, activation='relu', kernel_initializer='ones'),
layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(loss='mae',
metrics=compile_metrics,
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
class ResetStatesTest(keras_parameterized.TestCase):
def setUp(self):
super(ResetStatesTest, self).setUp()
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.ipu_model.compile_ipu_code = False
cfg.ipu_model.tiles_per_ipu = 1
cfg.configure_ipu_system()
self._ipu_strategy = ipu.ipu_strategy.IPUStrategyV1()
self._ipu_strategy_scope = self._ipu_strategy.scope()
self._ipu_strategy_scope.__enter__()
def tearDown(self):
self._ipu_strategy_scope.__exit__(None, None, None)
super(ResetStatesTest, self).tearDown()
def test_reset_states_false_positives(self):
fp_obj = metrics.FalsePositives()
model = _get_model([fp_obj])
x = np.ones((128, 4))
y = np.zeros((128, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 128.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 128.)
def test_reset_states_false_negatives(self):
fn_obj = metrics.FalseNegatives()
model = _get_model([fn_obj])
x = np.zeros((128, 4))
y = np.ones((128, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 128.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 128.)
def test_reset_states_true_negatives(self):
tn_obj = metrics.TrueNegatives()
model = _get_model([tn_obj])
x = np.zeros((128, 4))
y = np.zeros((128, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 128.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 128.)
def test_reset_states_true_positives(self):
tp_obj = metrics.TruePositives()
model = _get_model([tp_obj])
x = np.ones((128, 4))
y = np.ones((128, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 128.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 128.)
def test_reset_states_precision(self):
p_obj = metrics.Precision()
model = _get_model([p_obj])
x = np.concatenate((np.ones((64, 4)), np.ones((64, 4))))
y = np.concatenate((np.ones((64, 1)), np.zeros((64, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 64.)
self.assertEqual(self.evaluate(p_obj.false_positives), 64.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 64.)
self.assertEqual(self.evaluate(p_obj.false_positives), 64.)
def test_reset_states_recall(self):
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((64, 4)), np.zeros((64, 4))))
y = np.concatenate((np.ones((64, 1)), np.ones((64, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 64.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 64.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 64.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 64.)
def test_reset_states_auc(self):
auc_obj = metrics.AUC(num_thresholds=3)
model = _get_model([auc_obj])
x = np.concatenate((np.ones((32, 4)), np.zeros((32, 4)), np.zeros(
(32, 4)), np.ones((32, 4))))
y = np.concatenate((np.ones((32, 1)), np.zeros((32, 1)), np.ones(
(32, 1)), np.zeros((32, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 32.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 32.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 32.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 32.)
def test_reset_states_auc_manual_thresholds(self):
auc_obj = metrics.AUC(thresholds=[0.5])
model = _get_model([auc_obj])
x = np.concatenate((np.ones((32, 4)), np.zeros((32, 4)), np.zeros(
(32, 4)), np.ones((32, 4))))
y = np.concatenate((np.ones((32, 1)), np.zeros((32, 1)), np.ones(
(32, 1)), np.zeros((32, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 32.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 32.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 32.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 32.)
if __name__ == '__main__':
test.main()
| StarcoderdataPython |
4832377 | <gh_stars>0
from kubernetes import client, config, watch
class ClusterApi(object):
def __init__(self, namespace):
config.load_kube_config()
self.core = client.CoreV1Api()
self.batch = client.BatchV1Api()
self.namespace = namespace
def create_persistent_volume_claim(self, name, storage_size_in_g,
access_modes=["ReadWriteMany"],
storage_class_name="glusterfs-storage"):
pvc = client.V1PersistentVolumeClaim()
pvc.metadata = client.V1ObjectMeta(name=name)
storage_size = "{}Gi".format(storage_size_in_g)
resources = client.V1ResourceRequirements(requests={"storage": storage_size})
pvc.spec = client.V1PersistentVolumeClaimSpec(access_modes=access_modes,
resources=resources,
storage_class_name=storage_class_name)
return self.core.create_namespaced_persistent_volume_claim(self.namespace, pvc)
def delete_persistent_volume_claim(self, name):
self.core.delete_namespaced_persistent_volume_claim(name, self.namespace, client.V1DeleteOptions())
def create_secret(self, name, string_value_dict):
body = client.V1Secret(string_data=string_value_dict, metadata={'name': name})
return self.core.create_namespaced_secret(namespace=self.namespace, body=body)
def delete_secret(self, name):
self.core.delete_namespaced_secret(name, self.namespace, body=client.V1DeleteOptions())
def create_job(self, name, batch_job_spec):
body = client.V1Job(
metadata=client.V1ObjectMeta(name=name),
spec=batch_job_spec.create())
return self.batch.create_namespaced_job(self.namespace, body)
def wait_for_jobs(self, job_names):
waiting_for_job_names = set(job_names)
failed_job_names = []
w = watch.Watch()
for event in w.stream(self.batch.list_namespaced_job, self.namespace):
job = event['object']
job_name = job.metadata.name
if job.status.succeeded:
waiting_for_job_names.remove(job_name)
elif job.status.failed:
waiting_for_job_names.remove(job_name)
failed_job_names.append(job_name)
if not waiting_for_job_names:
w.stop()
if failed_job_names:
raise ValueError("Failed jobs: {}".format(','.join(failed_job_names)))
else:
print("Jobs complete: {}".format(','.join(job_names)))
def delete_job(self, name, propagation_policy='Background'):
body = client.V1DeleteOptions(propagation_policy=propagation_policy)
self.batch.delete_namespaced_job(name, self.namespace, body=body)
def create_config_map(self, name, data):
body = client.V1ConfigMap(
metadata=client.V1ObjectMeta(name=name),
data=data
)
return self.core.create_namespaced_config_map(self.namespace, body)
def delete_config_map(self, name):
self.core.delete_namespaced_config_map(name, self.namespace, body=client.V1DeleteOptions())
class Container(object):
def __init__(self, name, image_name, command, args, working_dir, env_dict,
requested_cpu, requested_memory, volumes):
self.name = name
self.image_name = image_name
self.command = command
self.args = args
self.working_dir = working_dir
self.env_dict = env_dict
self.requested_cpu = requested_cpu
self.requested_memory = requested_memory
self.volumes = volumes
def create_env(self):
environment_variables = []
for key, value in self.env_dict.items():
environment_variables.append(client.V1EnvVar(name=key, value=value))
return environment_variables
def create_volume_mounts(self):
return [volume.create_volume_mount() for volume in self.volumes]
def create_volumes(self):
return [volume.create_volume() for volume in self.volumes]
def create_resource_requirements(self):
return client.V1ResourceRequirements(
requests={
"memory": self.requested_memory,
"cpu": self.requested_cpu
})
def create(self):
return client.V1Container(
name=self.name,
image=self.image_name,
working_dir=self.working_dir,
command=[self.command],
args=self.args,
resources=self.create_resource_requirements(),
env=self.create_env(),
volume_mounts=self.create_volume_mounts()
)
class VolumeBase(object):
def __init__(self, name, mount_path):
self.name = name
self.mount_path = mount_path
def create_volume_mount(self):
return client.V1VolumeMount(
name=self.name,
mount_path=self.mount_path)
class SecretVolume(VolumeBase):
def __init__(self, name, mount_path, secret_name):
super(SecretVolume, self).__init__(name, mount_path)
self.secret_name = secret_name
def create_volume(self):
return client.V1Volume(
name=self.name,
secret=self.create_secret())
def create_secret(self):
return client.V1SecretVolumeSource(secret_name=self.secret_name)
class PersistentClaimVolume(VolumeBase):
def __init__(self, name, mount_path, volume_claim_name):
super(PersistentClaimVolume, self).__init__(name, mount_path)
self.volume_claim_name = volume_claim_name
def create_volume(self):
return client.V1Volume(
name=self.name,
persistent_volume_claim=self.create_volume_source())
def create_volume_source(self):
return client.V1PersistentVolumeClaimVolumeSource(claim_name=self.volume_claim_name)
class ConfigMapVolume(VolumeBase):
def __init__(self, name, mount_path, config_map_name, source_key, source_path):
super(ConfigMapVolume, self).__init__(name, mount_path)
self.config_map_name = config_map_name
self.source_key = source_key
self.source_path = source_path
def create_volume(self):
return client.V1Volume(
name=self.name,
config_map=self.create_config_map())
def create_config_map(self):
items = [client.V1KeyToPath(key=self.source_key, path=self.source_path)]
return client.V1ConfigMapVolumeSource(name=self.config_map_name,
items=items)
class BatchJobSpec(object):
def __init__(self, name, container):
self.name = name
self.pod_restart_policy = "Never"
self.container = container
def create(self):
job_spec_name = "{}spec".format(self.name)
return client.V1JobSpec(
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(name=job_spec_name),
spec=self.create_pod_spec()
)
)
def create_pod_spec(self):
return client.V1PodSpec(
containers=self.create_containers(),
volumes=self.create_volumes(),
restart_policy="Never"
)
def create_containers(self):
container = self.container.create()
return [container]
def create_volumes(self):
return self.container.create_volumes()
| StarcoderdataPython |
3393185 | <reponame>skitazaki/python-school-ja<filename>src/cmdline-3.py<gh_stars>0
from pprint import pprint
import settings
pprint(dir(settings))
pprint({'DEBUG': settings.DEBUG})
pprint(settings.DATABASES['default'])
| StarcoderdataPython |