id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
77854 | <filename>bazel/macros/http_toolchain.bzl
load("//bazel/macros:toolchains.bzl", "parse_toolchain_file")
def _archive_rule(provider):
additional = ""
if provider.archive_opts != None:
additional = "\n strip_prefix = \"%s\"," % (provider.archive_opts)
return """
http_archive(
name = "{name}",
url = "{url}",
sha256 = "{sha256}",
build_file_content = OPEN_FILE_ARCHIVE, {kwargs}
)""".format(
name = provider.archive,
url = provider.urls[0],
sha256 = provider.sha256,
kwargs = additional,
)
def _toolchain_rules(provider):
return """toolchain(
name = "{name}",
exec_compatible_with = {exec_compatible_with},
target_compatible_with = {target_compatible_with},
toolchain = ":{info}",
toolchain_type = ":toolchain_type",
)
externally_managed_toolchain(
name = "{info}",
tool = "{tool}",
)
""".format(
name = provider.toolchain,
exec_compatible_with = provider.exec_compatible_with,
target_compatible_with = provider.target_compatible_with,
info = "{}info".format(provider.toolchain),
tool = provider.tool,
)
def _register_external_toolchain_impl(repository_ctx):
toolchain_path = repository_ctx.path(repository_ctx.attr.toolchain)
tool = parse_toolchain_file(repository_ctx, toolchain_path)
providers = tool.toolchains
toolchain_rules = []
tool_archive_rules = []
for provider in providers:
toolchain_rule = _toolchain_rules(provider)
toolchain_rules.append(toolchain_rule)
tool_archive_rule = _archive_rule(provider)
tool_archive_rules.append(tool_archive_rule)
repository_ctx.file(
"BUILD.bazel",
"""load("@bazel-external-toolchain-rules//bazel/macros:http_toolchain.bzl", "externally_managed_toolchain")
package(default_visibility = ["//visibility:public"])
toolchain_type(name = "toolchain_type")
{rules}
""".format(rules = "\n".join(toolchain_rules)),
)
repository_ctx.file(
"deps.bzl",
"""load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
OPEN_FILE_ARCHIVE = \"\"\"
package(default_visibility = ["//visibility:public"])
filegroup(
name = "files",
srcs = glob(["*","**/*"]),
)
\"\"\"
def install_toolchain():
native.register_toolchains(
{toolchains}
)
{rules}
""".format(
rules = "\n".join(tool_archive_rules),
toolchains = ",\n ".join([
'"@{}//:{}"'.format(repository_ctx.name, toolchain.toolchain)
for toolchain in providers
]),
),
)
register_external_toolchain = repository_rule(
_register_external_toolchain_impl,
attrs = {
"toolchain": attr.label(
mandatory = True,
allow_single_file = True,
),
},
)
ExternallyManagedToolExecutableInfo = provider(
doc = "Externally managed toolchain through use of file.",
fields = {"tool": ""},
)
def _externally_managed_toolchain_impl(ctx):
toolchain_info = platform_common.ToolchainInfo(
toolinfo = ExternallyManagedToolExecutableInfo(
tool = ctx.file.tool,
),
)
return [toolchain_info]
externally_managed_toolchain = rule(
implementation = _externally_managed_toolchain_impl,
attrs = {
"tool": attr.label(
executable = True,
allow_single_file = True,
mandatory = True,
cfg = "host",
),
},
)
| StarcoderdataPython |
41621 | # <NAME>
# 2017A7PS0112P
from gui import Gui
Gui().loop() | StarcoderdataPython |
4826013 | <reponame>pasmuss/cmssw
import FWCore.ParameterSet.Config as cms
rpcFEDIntegrity = cms.EDAnalyzer("RPCFEDIntegrity",
RPCPrefixDir = cms.untracked.string('RPC/FEDIntegrity'),
RPCRawCountsInputTag = cms.untracked.InputTag('muonRPCDigis'),
NumberOfFED = cms.untracked.int32(3)
)
| StarcoderdataPython |
6116 | <reponame>CzechInvest/ciis
from django.contrib import admin
from django.contrib.gis import geos
from leaflet.admin import LeafletGeoAdmin, LeafletGeoAdminMixin
from .models import Lau1
from .models import Nuts3
from .models import Airport
from .models import Road
from .models import PublicTransportStop
from .models import RailwayStation
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
import nested_admin
import uuid
import json
class AirportAdmin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class RoadAdmin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class RailwayStationAdmin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class PublicTransportStopAdmin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class LAU1Admin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class NUTS3Admin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class NUTS3AdminInline(LeafletGeoAdminMixin, admin.StackedInline):
model = Nuts3
class LAU1AdminInline(LeafletGeoAdminMixin, admin.StackedInline):
model = Lau1
class NUTS3Filter(admin.SimpleListFilter):
"""Filter for admin interface of NUTS3 regions (Kraje)
"""
title = _('NUTS3 regions')
parameter_name = 'nuts3#'
def lookups(self, request, model_admin):
nuts3 = Nuts3.objects.all()
return (
(obj.id, obj.name) for obj in nuts3
)
def queryset(self, request, queryset):
val = self.value()
if val:
nuts3 = Nuts3.objects.get(pk=val)
results = queryset.filter(
location__geometry__intersects=nuts3.geometry)
else:
results = queryset
return results
class ArealFieldAdmin(nested_admin.NestedModelAdmin):
geojson_attributes = []
def get_place(self, obj):
if hasattr(obj.location, "address") and \
obj.location.address is not None:
return obj.location.address.city
else:
return ", ".join(
[l.__str__() for l in Nuts3.objects.filter(
geometry__intersects=obj.location.geometry)])
def get_search_results(self, request, queryset, search_term):
"""Add NUTS3 (by name) search and area size search (using `<>` operator)
"""
result, use_distinct = super(
ArealFieldAdmin, self).get_search_results(
request, queryset, search_term)
if search_term:
if len(result) == 0 or len(result) == len(queryset):
result = self._search_lay1_nuts3_by_name(
queryset, search_term)
if len(result) == 0 or len(result) == len(queryset):
result = self._search_area(queryset, search_term)
return (result, use_distinct)
def _search_lay1_nuts3_by_name(self, queryset, search_term):
"""Search NUTS3 (kraje) and LAU1 (okresy) region according to name
"""
filtered = queryset.none()
for cls in (Lau1, Nuts3):
objs = cls.objects.filter(name__startswith=search_term)
for o in objs:
objects = queryset.filter(
location__geometry__intersects=o.geometry)
filtered |= objects
return filtered
def _search_area(self, queryset, search_term):
"""Search all features, where MIN < area.total < MAX
"""
filtered = queryset.none()
if search_term.find("<>") > -1:
area_min, area_max = [float(x) for x in search_term.split("<>")]
filtered = queryset.filter(
areal__area__total__gte=area_min,
areal__area__total__lte=area_max)
return filtered
def changelist_view(self, request, extra_context=None):
"""Adjust change list view
add GeoJSON encoded data for the queryset
"""
extra_context = extra_context or {}
response = super().changelist_view(
request, extra_context=extra_context,
)
if hasattr(response, "context_data"):
filtered_query_set = response.context_data["cl"].queryset
extra_context['objects_data'] = \
json.dumps(self.as_geojson(filtered_query_set))
response.context_data.update(extra_context)
return response
def as_geojson(self, queryset):
if self.geojson_attributes:
attributes = self.geojson_attributes
else:
attributes = []
data = {
"type": "FeatureCollection",
"features": []
}
for obj in queryset:
geom = None
if hasattr(obj, "location_set"):
multipoint = geos.MultiPoint(
[loc.address.coordinates for loc in obj.location_set.all()])
geom = multipoint.centroid
elif hasattr(obj, "location"):
geom = obj.location.geometry.centroid
elif hasattr(obj, "geom"):
geom = obj.geom
elif hasattr(obj, "address"):
geom = obj.address.coordinates
if geom:
title = None
if hasattr(obj, "title"):
title = obj.title
elif hasattr(obj, "name"):
title = obj.name
if type(obj.pk) == uuid.UUID:
id = str(obj.pk)
else:
id = obj.pk
feature = {
"type": "Feature",
"properties": {
"name": title,
"object_url":
reverse('admin:{}_{}_change'.format(
obj._meta.app_label,
obj._meta.model_name), args=(obj.pk,)),
},
"geometry": json.loads(geom.json),
"id": id
}
for attribute in attributes:
if hasattr(obj, attribute):
value = getattr(obj, attribute.__str__())
if type(value) == uuid.UUID:
feature[attribute] = str(value)
else:
feature[attribute] = value
#print(feature)
data["features"].append(feature)
return data
# Register your models here.
admin.site.register(Lau1, LAU1Admin)
admin.site.register(Nuts3, NUTS3Admin)
admin.site.register(Road, RoadAdmin)
admin.site.register(PublicTransportStop, PublicTransportStopAdmin)
admin.site.register(RailwayStation, RailwayStationAdmin)
admin.site.register(Airport, AirportAdmin)
| StarcoderdataPython |
3333398 | <reponame>ilg-ul/flickr-get-stats
"""
Usage:
python ilg.flickr.collections.get.wp [options]
Options:
-i "FILE", --input="FILE"
input pickle file
--input=
default '$HOME/Documents/Flickr/Collections.pickle'
--wp_url="URL"
URL of WordPress XMLRPC endpoint
--wp_user="user"
WordPress user name
--wp_passwd="<PASSWORD>"
WordPress user password
--wp_key="key"
WordPress blog key (from Users -> Personal Settings -> API Key)
--wp_map="Flickr_id:WordPress_id:comment" (list)
Map Flickr collection id to WordPress page id.
Index page entered as 'index'.
The comment can be used to store the collection title.
--wp_out="WordPress_id" (list)
WordPress page to be generated
-v, --verbose
print progress output
-V, --version
print program version
-h, --help
print this message
Purpose:
Iterate all collections from a Flickr account, generate the
html pages and publish them on WordPress.
If the input file is specified, the program will use the local serialised
file instead of Flickr (useful for testing).
---
This program will generate WordPress pages with a Table of Content and
a detailes tree of collections and albums.
The global index page will contain all collection in the Flickr account,
but will not include albums.
The authentication is done only once, and the Flickr authentication
token is stored in the user home directory.
"""
import getopt
import os
import sys
import flickrapi
import time
import io
import pickle
from pyblog import WordPress
from ilg.flickr.application.api import API
from ilg.flickr.collections.get.html.writer import Writer
from ilg.flickr.collections.get.aggregate import Aggregate
from ilg.flickr.collections.get.tree import Tree
from ilg.flickr.collections.get.html.tocwriter import TOCWriter
# ---------------------------------------------------------------------
def usage():
print __doc__
def main(*argv):
oApp = API()
try:
opts, args = getopt.getopt(argv[1:], 'hi:vV', ['help', 'input=', 'wp_url=', 'wp_user=', 'wp_passwd=', 'wp_key=', 'wp_map=', 'wp_out=', 'verbose', 'version'])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
return 2
if len(args) > 0:
print 'unused arguments: ', args
usage()
return 2
sArgInput = None
bArgVerbose = False
sWpUrl = None
sWpUser = None
sWpPasswd = None
sWpKey = None
dWpMapByCollectionId = {} # mapping dictionary; key=colection_id; value=WP_id
dWpMapByPageId = {} # mapping dictionary; key=WP_id; value=colection_id
aWpOut = [] # list of WP_ids to generate
for o, a in opts:
if o in ('-v', '--verbose'):
bArgVerbose = True
elif o in ('-h', '--help'):
usage()
return 0
elif o in ('-V', '--version'):
print "version: " + oApp.getProgramVersion()
return 0
elif o in ('-i', '--input'):
if a != None and a != '':
sArgInput = a
else:
sArgInput = oApp.getUserHome()
sArgInput += '/Documents/Flickr/Collections.pickle'
elif o in ('--wp_url'):
sWpUrl = a
elif o in ('--wp_user'):
sWpUser = a
elif o in ('--wp_passwd'):
sWpPasswd = a
elif o in ('--wp_key'):
sWpKey = a
elif o in ('--wp_map'):
aSub = a.split(':')
if len(aSub) >= 2:
sCollectionId = aSub[0]
iWpPageId = int(aSub[1])
dWpMapByCollectionId[sCollectionId] = iWpPageId
dWpMapByPageId[iWpPageId] = sCollectionId
else:
print 'option "%s=%s" ignored' % (o, a)
elif o in ('--wp_out'):
# non numeric IDs are ignored
if a.isdigit():
aWpOut.append(int(a))
else:
print 'option "%s=%s" ignored' % (o, a)
else:
assert False, 'option not handled'
if sWpUrl == None or sWpUser == None or sWpPasswd == None or sWpKey == None or len(dWpMapByCollectionId) == 0 or len(aWpOut) == 0:
usage()
return 2
if sArgInput == None:
oInStream = None
else:
oInStream = open(sArgInput, 'rb')
if bArgVerbose:
print sArgInput
nBeginSecs = time.time()
# create worker objects
oMainWriter = Writer()
oTOCWriter = TOCWriter()
oWriters = [ oTOCWriter ]
oRoot = None
nMainRet = 0
try:
dRemapUrl = {}
for sCollectionId in dWpMapByCollectionId.keys():
if sCollectionId.isdigit():
dRemapUrl[sCollectionId] = '/?p=%d' % dWpMapByCollectionId[sCollectionId]
oBlog = WordPress(sWpUrl, sWpUser, sWpPasswd)
if False:
aPageList = oBlog.get_page_list(sWpKey)
dBlogPagesById = {}
for dPage in aPageList:
print dPage
dBlogPagesById[int(dPage['page_id'])] = dPage
if oInStream == None:
flickr = oApp.authenticate()
oTree = Tree(flickr, bArgVerbose)
oRoot = oTree.build()
else:
oTree = pickle.load(oInStream)
oRoot = oTree.getRoot()
oAgregate = Aggregate(oTree, oMainWriter, oWriters, bArgVerbose)
iWpPages = 0
for nPageId in aWpOut:
sCollectionId = None
# find the corresponding Flickr collection id
if nPageId in dWpMapByPageId:
sCollectionId = dWpMapByPageId[nPageId]
else:
print 'Collection %s not found in --wp_map' % sCollectionId
continue
bOutputSets = True
if sCollectionId == '*' or sCollectionId == 'index':
oNode = oTree.getRoot()
bOutputSets = False
else:
oNode = oTree.findCollection(sCollectionId)
if oNode == None:
print 'Collection %s not found in Flickr' % sCollectionId
continue
oOutStream = io.StringIO()
oAgregate.runSingleOutput(oNode, bOutputSets, dRemapUrl, oOutStream)
sContent = oOutStream.getvalue()
oOutStream.close()
dPage = oBlog.get_page(nPageId, sWpKey)
#print dPage['description']
sTitle = dPage['title'] # mandatory
sSlug = dPage['wp_slug'] # mandatory
sDescription = dPage['description']
#print sDescription
print 'Page %d "%s" read in' % (nPageId, sTitle)
sDescription = sContent
dContent = {}
dContent['description'] = sDescription
dContent['title'] = sTitle
dContent['wp_slug'] = sSlug
oRet = oBlog.edit_page(nPageId, dContent, True, sWpKey)
if oRet:
print 'Page %d "%s" published' % (nPageId, sTitle)
iWpPages += 1
else:
print 'Page %s not published' % iWpPages
# end of for nPageId
except flickrapi.exceptions.FlickrError as ex:
print 'FlickrError', ex
if bArgVerbose:
nEndTime = time.time()
nDuration = nEndTime-nBeginSecs
if oRoot != None:
print '[done, %d collection(s), %d set(s), %d photos, %d pages, %d sec]' % (oRoot.nCollections, oRoot.nSets, oRoot.nPhotos, iWpPages, nDuration)
else:
print '[done, %d sec]' % (nDuration)
return nMainRet
if __name__ == '__main__':
sys.exit(main(*sys.argv))
| StarcoderdataPython |
3236457 | # -*- coding: utf-8 -*-
"""
This module implements interfacing tools for the FISTA algorithm.
For further informations about the FISTA algorithm, have a look at [1]_.
.. [1] BECK, <NAME>, Marc. A fast iterative shrinkage-thresholding
algorithm for linear inverse problems. SIAM journal on imaging sciences,
2009, vol. 2, no 1, p. 183-202.
"""
import time
import math
import logging
import numpy as np
_logger = logging.getLogger(__name__)
class FISTA:
"""
Fast Iterative Shrinkage-Thresholding Algorithm implementation.
Attributes
----------
f: function
:math:`C^{1,1}` convex function.
df: function
derivative function of f.
L: float
Lipshitz contant of f.
g: function
Non-smooth function.
pg: function
g poximal operator.
shape: tuple
The data shape.
Nit: None, int
Number of iteration.
If None, the iterations will stop as soon as the functional
no longer evolve.
Default is None.
init: numpy array
Init point which shape is the same as the data.
If None, a random initailization is drawn.
Default is None.
verbose: bool
If True, process informations are sent to the output.
Default is True.
Nit_max: int
Maximum number of iterations.
tau: float
Descent step.
E: numpy array
Functional evolution across iterations.
lim: float
Controlls the stop condition in case Nit is None.
The smallest lim, the more iterations before stopping.
lim is usually 1e-4.
"""
def __init__(
self, f, df, L, g, pg, shape, Nit=None, init=None, verbose=True):
"""Initialization function for FISTA.
Arguments
---------
f: function
:math:`C^{1,1}` convex function.
df: function
derivative function of f.
L: float
Lipshitz contant of f.
g: function
Non-smooth function.
pg: function
g poximal operator.
shape: tuple
The data shape.
Nit: None, int
Number of iteration.
If None, the iterations will stop as soon as the functional
no longer evolve.
Default is None.
init: numpy array
Init point which shape is the same as the data.
If None, a random initailization is drawn.
Default is None.
verbose: bool
If True, process informations are sent to the output.
Default is True.
"""
# Check input
if L <= 0:
raise ValueError('Input L parameter should be strct. positive.')
if Nit is not None:
if Nit <= 0:
raise ValueError('Input number of iteration is non-positive.')
if Nit > 1e6:
raise ValueError('Input number of iterations is really high.')
if init is not None:
if init.shape != shape:
raise ValueError(
'Input init shape and shape parameter do not match.')
else:
np.random.seed(1)
init = np.random.randn(*shape)
if not isinstance(verbose, bool):
raise ValueError('Input verbose parameter is not boolean.')
# Save attributes for methods
_logger.info('Setting up new FISTA optimizer.')
self.f = f
self.df = df
self.g = g
self.pg = pg
self.L = L
self.shape = shape
self.Nit = Nit
self.init = init
self.verbose = verbose
# Parameters
# Max number of iterations.
self.Nit_max = 1000 if self.Nit is None else self.Nit
# Step.
self.tau = 0.99 / self.L
# Functional values across iterations.
self.E = np.zeros(self.Nit_max)
# Tunes stop condition when Nit is None.
self.lim = 1e-4
def StopCritera(self, n):
"""This function computes a critera that informs about the algorithm
convergence at step n.
Arguments
---------
n: int
Current step
Returns
-------
float
Value of the critera.
"""
if np.allclose(self.E[n - 2], 0):
return None
else:
return np.abs(self.E[n-1] - self.E[n - 2])/(
self.E[n - 2] * self.tau)
def StopTest(self, n):
"""This function choose if iterations should be stopped at step n.
If Nit is not None, it returns True as long as n is smaller than Nit.
If Nit is None, it returns True as long as the functional is evolving
fast.
Arguments
---------
n: int
Current step.
Returns
-------
bool
Should the iterations go on ?
"""
# Iterations should be continued as long as n is smaller than
# Nit.
if self.Nit is not None:
return n < self.Nit
# The result depends on n and the critera.
else:
if n < 2:
return True
if n >= self.Nit_max:
return False
else:
critera = self.StopCritera(n)
# Iterations should be stopped as we got close enough to 0.
if critera is None:
if self.verbose:
print(
'Iterations stopped as the functional is allclose'
' to 0.')
return False
else:
return critera > self.lim
def execute(self):
"""Method that executes the FISTA algorithm.
Returns
-------
numpy array
The optimum of the optimization problem.
dict
Extra informations about convergence.
Note
----
Infos in output dictionary:
* :code:`E`: Evolution of the functional along the iterations.
* :code:`time`: Execution time.
"""
_logger.info('Starting FISTA optimization.')
start = time.time()
#
# Initialization
#
X0 = self.init
n = 0
theta = 1
Xm1 = X0
Xy = X0
#
# Iterations
#
while self.StopTest(n):
# Display info
if self.verbose:
if n >= 2:
critera = self.StopCritera(n)
print(
'n: {}, f + g: {:.3e}, critera: {:.5f} '
'(goal: {:.1e})'.format(
n,
self.E[n-1],
0 if critera is None else critera,
self.lim)
)
else:
print('n: {}'.format(n))
# 1st step - Gradient descent
X = Xy - self.tau * self.df(Xy)
# 2nd step - Thresholding
X = self.pg(X)
# Update
thetap1 = 0.5 * (1 + math.sqrt(1 + 4 * theta**2))
Xy = X + ((theta - 1) / thetap1) * (X - Xm1)
#
theta = thetap1
Xm1 = X
# Compute cost function and increment
self.E[n] = self.f(X) + self.g(X)
n = n + 1
# import ipdb; ipdb.set_trace()
self.E = self.E[:n]
# Output info
InfoOut = {'E': self.E, 'time': time.time() - start}
_logger.info('FISTA optimization finished.')
return X, InfoOut
| StarcoderdataPython |
4816574 | # ------------------------------------------------------
#
# Simple Python to parse an XML Stream from the
# TDI Sensors Output Data Standard Format V2.0 Feb 2020
# TD-1600 Report Data Packet Label Structure
#
# -----------------------------------------------------
import xml.etree.ElementTree as ET
# send and email with the data
# Import smtplib for the actual sending function
#
import smtplib
# Import the email modules we'll need
#
from email.message import EmailMessage
# read the xml stream xml_data_stream
# root = ET.fromstring(xml_data_stream)
# open the XML File output from the camera
#
tree = ET.parse('c:\\pg\shopDoor1.xml')
# XML
root = tree.getroot()
print(root.tag,root.attrib)
# if you want the whole thing
#
for child in root:
print(" %s : %s \n" % (child.tag, child.attrib))
for cnt in root.findall('Count'):
# add these keys to the XML output
#
newKey_shoppers = ET.SubElement(cnt, 'total_shoppers')
newKey_insideBuilding = ET.SubElement(cnt, 'ppl_inside_building')
# parse the xml for the number of people coming in and out of enterance
#
startTime = cnt.find('StartTime').text
endTime = cnt.find('EndTime').text
enters = cnt.find('Enters').text
exits = cnt.find('Exits').text
status = cnt.find('Status').text
staff = cnt.find('Staffs').text
VIPs = cnt.find('VIPs').text
Male0_14 = cnt.find('Male0_14').text
Female0_14 = cnt.find('Female0_14').text
Male15_29 = cnt.find('Male15_29').text
Female15_29 = cnt.find('Female15_29').text
Male30_59 = cnt.find('Male30_59').text
Female30_59 = cnt.find('Female30_59').text
Male60_ = cnt.find('Male60_').text
Female60_ = cnt.find('Female60_').text
female = int(Female0_14) + int(Female15_29) + int(Female30_59) + int(Female60_)
male = int(Male0_14) + int(Male15_29) + int(Male30_59) + int(Male60_)
young = int(Male0_14) + int(Female0_14)
teen = int(Male15_29) + int(Female15_29)
mid = int(Male30_59) + int(Female30_59)
old = int(Male60_) + int(Female60_)
inside = enters - exits
shoppers = inside - staff
print(startTime)
print(endTime)
print(" total inside = %u shoppers inside = %u\n" % (inside,shoppers))
print(" male = %u female = %u vip = %u" % (male,female,vip))
print(" young = %u teen = %u mid = %u old = %u" % (young,teen,mid,old))
# adds a section to the XML (writes -1 if the data cant be trusted)
#
if ( int(status) == 0 ):
newKey_shoppers = shoppers
newKey_insideBuilding = inside
else:
newKey_shoppers = -1
newKey_insideBuilding = -1
# Create a text/plain message
#
msg = EmailMessage()
emailData = " total inside = " + str(inside) + " total shoppers inside = " + str(shoppers) + "\n start time : " + startTime + "\n end time : \b" + endTime
msg.set_content(emailData)
msg['Subject'] = f'The contents of camera data from {startTime}'
msg['From'] = <EMAIL>
msg['To'] = <EMAIL>
# Send the message via our own SMTP server.
#
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
tree = ET.ElementTree(root)
# print VIP key
#
for taggedPerson in root.iter('VIP'):
print(taggedPerson.attrib)
# print Service key
#
for cnt in root.findall('Service'):
status = cnt.find('Status').text
if ( int(status) == 0 ):
startTime = cnt.find('StartTime').text
endTime = cnt.find('EndTime').text
nos = cnt.find('NumberServed').text
so = cnt.find('SecondsOccupied').text
tso = cnt.find('TotalSecondsOccupied').text
print(startTime)
print(endTime)
print(" number served = %u occupied (seconds) = %u total (seconds) = %u\n" % (nos,so,tso))
for cnt in root.findall('DwellTimes'):
for DwellTime in cnt.iter('ServiceTime'):
print(DwellTime.attrib)
tree.write('C:\pg\shopDoor1-1.xml', encoding="UTF-8")
| StarcoderdataPython |
4825433 | # -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
import sys
sys.path.append("/home/scotfang/gits/CTranslate2/python/build/lib.linux-x86_64-3.7")
import ctranslate2
from ctranslate2.specs.model_spec import OPTIONAL, index_spec
from ctranslate2.specs import transformer_spec
from ctranslate2.converters import opennmt_tf
_TEST_DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "tests", "data")
def _get_model_path():
return os.path.join(_TEST_DATA_DIR, "models", "v2", "aren-transliteration")
def _get_transliterator():
return ctranslate2.Translator(_get_model_path())
def test_invalid_model_path():
with pytest.raises(RuntimeError):
ctranslate2.Translator("xxx")
def test_contains_model(tmpdir):
assert ctranslate2.contains_model(_get_model_path())
model_dir = tmpdir.join("model")
model_dir.ensure(dir=1)
assert not ctranslate2.contains_model(str(model_dir))
model_dir.join("model.bin").ensure(file=1)
assert ctranslate2.contains_model(str(model_dir))
def test_translator_properties():
translator = ctranslate2.Translator(_get_model_path(), inter_threads=2)
assert translator.model_is_loaded
assert translator.device == "cpu"
assert translator.device_index == 0
assert translator.num_translators == 2
assert translator.num_queued_batches == 0
def test_compute_type():
model_path = _get_model_path()
with pytest.raises(ValueError):
ctranslate2.Translator(model_path, compute_type="float64")
with pytest.raises(TypeError):
ctranslate2.Translator(model_path, compute_type=["int8", "int16"])
ctranslate2.Translator(model_path, compute_type="int8")
ctranslate2.Translator(model_path, compute_type={"cuda": "float16", "cpu": "int8"})
@pytest.mark.parametrize("max_batch_size", [0, 1])
def test_batch_translation(max_batch_size):
translator = _get_transliterator()
output = translator.translate_batch([
["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"],
["آ" ,"ت" ,"ش" ,"ي" ,"س" ,"و" ,"ن"]],
max_batch_size=max_batch_size)
assert len(output) == 2
assert len(output[0]) == 1 # One hypothesis.
assert len(output[1]) == 1
assert output[0][0]["tokens"] == ["a", "t", "z", "m", "o", "n"]
assert output[0][0]["score"] < 0
assert "attention" not in output[0][0]
assert output[1][0]["tokens"] == ["a", "c", "h", "i", "s", "o", "n"]
def test_file_translation(tmpdir):
input_path = str(tmpdir.join("input.txt"))
output_path = str(tmpdir.join("output.txt"))
with open(input_path, "w") as input_file:
input_file.write("آ ت ز م و ن")
input_file.write("\n")
input_file.write("آ ت ش ي س و ن")
input_file.write("\n")
translator = _get_transliterator()
stats = translator.translate_file(input_path, output_path, max_batch_size=32)
with open(output_path) as output_file:
lines = output_file.readlines()
assert lines[0].strip() == "a t z m o n"
assert lines[1].strip() == "a c h i s o n"
assert stats[0] == 13 # Number of generated target tokens.
assert stats[1] == 2 # Number of translated examples.
assert isinstance(stats[2], float) # Total time in milliseconds.
def test_raw_file_translation(tmpdir):
input_path = str(tmpdir.join("input.txt"))
output_path = str(tmpdir.join("output.txt"))
with open(input_path, "w") as input_file:
input_file.write("آتزمون")
input_file.write("\n")
input_file.write("آتشيسون")
input_file.write("\n")
translator = ctranslate2.Translator(_get_model_path())
tokenize_fn = lambda text: list(text)
detokenize_fn = lambda tokens: "".join(tokens)
max_batch_size = 4
with pytest.raises(ValueError):
translator.translate_file(
input_path, output_path, max_batch_size, tokenize_fn=tokenize_fn)
with pytest.raises(ValueError):
translator.translate_file(
input_path, output_path, max_batch_size, detokenize_fn=detokenize_fn)
translator.translate_file(
input_path,
output_path,
max_batch_size,
tokenize_fn=tokenize_fn,
detokenize_fn=detokenize_fn)
with open(output_path) as output_file:
lines = output_file.readlines()
assert lines[0].strip() == "atzmon"
assert lines[1].strip() == "achison"
def test_file_translation_with_prefix(tmpdir):
source_path = str(tmpdir.join("input.txt"))
target_path = str(tmpdir.join("target.txt"))
output_path = str(tmpdir.join("output.txt"))
with open(source_path, "w") as source_file:
source_file.write("آ ت ز م و ن")
source_file.write("\n")
source_file.write("آ ت ش ي س و ن")
source_file.write("\n")
with open(target_path, "w") as target_file:
target_file.write("a t s\n")
translator = _get_transliterator()
max_batch_size = 4
with pytest.raises(RuntimeError):
# One line is missing from target_path.
translator.translate_file(
source_path,
output_path,
max_batch_size,
target_path=target_path)
with open(target_path, "a") as target_file:
target_file.write("\n") # No prefix.
translator.translate_file(
source_path,
output_path,
max_batch_size,
target_path=target_path)
with open(output_path) as output_file:
lines = output_file.readlines()
assert lines[0].strip() == "a t s u m o n"
assert lines[1].strip() == "a c h i s o n"
def test_raw_file_translation_with_prefix(tmpdir):
source_path = str(tmpdir.join("input.txt"))
target_path = str(tmpdir.join("target.txt"))
output_path = str(tmpdir.join("output.txt"))
with open(source_path, "w") as source_file:
source_file.write("آتزمون")
source_file.write("\n")
source_file.write("آتشيسون")
source_file.write("\n")
with open(target_path, "w") as target_file:
# Write target in reverse to use a different tokenization.
target_file.write("sta\n")
target_file.write("\n")
translator = ctranslate2.Translator(_get_model_path())
source_tokenize_fn = lambda text: list(text)
target_tokenize_fn = lambda text: list(reversed(list(text)))
detokenize_fn = lambda tokens: "".join(tokens)
max_batch_size = 4
with pytest.raises(ValueError):
# Target tokenization is missing.
translator.translate_file(
source_path,
output_path,
max_batch_size,
tokenize_fn=source_tokenize_fn,
detokenize_fn=detokenize_fn,
target_path=target_path)
translator.translate_file(
source_path,
output_path,
max_batch_size,
tokenize_fn=source_tokenize_fn,
detokenize_fn=detokenize_fn,
target_path=target_path,
target_tokenize_fn=target_tokenize_fn)
with open(output_path) as output_file:
lines = output_file.readlines()
assert lines[0].strip() == "atsumon"
assert lines[1].strip() == "achison"
def test_empty_translation():
translator = _get_transliterator()
assert translator.translate_batch([]) == []
def test_invalid_translation_options():
translator = _get_transliterator()
with pytest.raises(ValueError):
translator.translate_batch(
[["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]],
min_decoding_length=10,
max_decoding_length=5)
def test_hard_target_prefix():
translator = _get_transliterator()
output = translator.translate_batch(
[["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"], ["آ" ,"ت" ,"ش" ,"ي" ,"س" ,"و" ,"ن"]],
target_prefix=[["a", "t", "s"], None])
assert output[0][0]["tokens"][:3] == ["a", "t", "s"]
assert output[1][0]["tokens"] == ["a", "c", "h", "i", "s", "o", "n"]
def test_strongly_biased_target_prefix():
translator = _get_transliterator()
output = translator.translate_batch(
[["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"], ["آ" ,"ت" ,"ش" ,"ي" ,"س" ,"و" ,"ن"]],
target_prefix=[["a", "t", "s"], None],
prefix_bias_beta=0.9999999)
assert output[0][0]["tokens"][:3] == ["a", "t", "s"]
assert output[1][0]["tokens"] == ["a", "c", "h", "i", "s", "o", "n"]
def test_weakly_biased_target_prefix():
translator = _get_transliterator()
unconstrained_output = translator.translate_batch(
[["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"], ["آ" ,"ت" ,"ش" ,"ي" ,"س" ,"و" ,"ن"]])
weakly_biased_output = translator.translate_batch(
[["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"], ["آ" ,"ت" ,"ش" ,"ي" ,"س" ,"و" ,"ن"]],
target_prefix=[["a", "t", "s"], ["s", "i", "o"]],
prefix_bias_beta=0.0000001)
assert unconstrained_output[0][0]["tokens"] == weakly_biased_output[0][0]["tokens"]
assert abs(unconstrained_output[0][0]["score"] - weakly_biased_output[0][0]["score"]) < 0.00001
assert unconstrained_output[1][0]["tokens"] == weakly_biased_output[1][0]["tokens"]
assert abs(unconstrained_output[1][0]["score"] - weakly_biased_output[1][0]["score"]) < 0.00001
def test_didi_cuda_nihao_no_prefix():
translator = ctranslate2.Translator(
"/home/yiqihuang/projects/didi-translate/data/ctranslate2_models/zhen-v4/ctranslate2",
device='cuda')
output = translator.translate_batch(
[["你好"]], beam_size=4, length_penalty=0, target_prefix=[None], prefix_bias_beta=0.99)
assert output[0][0]['tokens'] == ['Hello']
def test_didi_cuda_nihao_strongly_biased_prefix():
translator = ctranslate2.Translator(
"/home/yiqihuang/projects/didi-translate/data/ctranslate2_models/zhen-v4/ctranslate2",
device='cuda')
output = translator.translate_batch(
[["你好"]], beam_size=4, length_penalty=0, target_prefix=[["Bye"]], prefix_bias_beta=0.99)
assert output[0][0]['tokens'] == ['Bye']
def test_didi_cuda_nihao_weakly_biased_prefix():
translator = ctranslate2.Translator(
"/home/yiqihuang/projects/didi-translate/data/ctranslate2_models/zhen-v4/ctranslate2",
device='cuda')
output = translator.translate_batch(
[["你好"]], beam_size=4, length_penalty=0, target_prefix=[["Bye"]], prefix_bias_beta=0.01)
assert output[0][0]['tokens'] == ['Hello']
def test_didi_weakly_biased_decoding_los_angeles():
translator = ctranslate2.Translator(
"/home/yiqihuang/projects/didi-translate/data/ctranslate2_models/zhen-v4/ctranslate2",
device='cuda')
output = translator.translate_batch(
[["洛杉矶"]], beam_size=2, length_penalty=0, target_prefix=[["San", "Francisco"]], prefix_bias_beta=0.01)
assert output[0][0]['tokens'] == ['Los', 'Angeles']
def test_didi_strongly_biased_decoding_los_angeles():
translator = ctranslate2.Translator(
"/home/yiqihuang/projects/didi-translate/data/ctranslate2_models/zhen-v4/ctranslate2",
device='cuda')
output = translator.translate_batch(
[["洛杉矶"]], beam_size=2, length_penalty=0, target_prefix=[["San", "Francisco"]], prefix_bias_beta=0.99)
assert output[0][0]['tokens'] == ['San', 'Francisco']
def test_num_hypotheses():
translator = _get_transliterator()
output = translator.translate_batch(
[["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]], beam_size=4, num_hypotheses=2)
assert len(output[0]) == 2
def test_max_decoding_length():
translator = _get_transliterator()
output = translator.translate_batch([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]], max_decoding_length=2)
assert output[0][0]["tokens"] == ["a", "t"]
def test_min_decoding_length():
translator = _get_transliterator()
output = translator.translate_batch([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]], min_decoding_length=7)
assert len(output[0][0]["tokens"]) > 6 # 6 is the expected target length.
def test_return_attention():
translator = _get_transliterator()
output = translator.translate_batch([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]], return_attention=True)
attention = output[0][0]["attention"]
assert len(attention) == 6 # Target length.
assert len(attention[0]) == 6 # Source length.
def test_ignore_scores():
translator = _get_transliterator()
output = translator.translate_batch(
[["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]],
beam_size=1,
return_scores=False)
assert "scores" not in output[0][0]
def test_return_alternatives():
translator = _get_transliterator()
output = translator.translate_batch(
[["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]],
target_prefix=[["a", "t"]],
num_hypotheses=10,
return_alternatives=True)
assert len(output[0]) == 10
assert output[0][0]["tokens"] == ["a", "t", "z", "m", "o", "n"]
assert output[0][1]["tokens"] == ["a", "t", "s", "u", "m", "o", "n"]
@pytest.mark.parametrize("to_cpu", [False, True])
def test_model_unload(to_cpu):
batch = [["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]]
translator = _get_transliterator()
translator.unload_model(to_cpu=to_cpu)
with pytest.raises(RuntimeError, match="unloaded"):
translator.translate_batch(batch)
translator.load_model()
output = translator.translate_batch(batch)
assert len(output) == 1
assert output[0][0]["tokens"] == ["a", "t", "z", "m", "o", "n"]
_FRAMEWORK_DATA_EXIST = os.path.isdir(
os.path.join(_TEST_DATA_DIR, "models", "transliteration-aren-all"))
@pytest.mark.skipif(not _FRAMEWORK_DATA_EXIST, reason="Data files are not available")
@pytest.mark.parametrize(
"model_path,src_vocab,tgt_vocab,model_spec",
[("v2/savedmodel", None, None, "TransformerBase"),
("v2/savedmodel", None, None, ctranslate2.specs.TransformerSpec(num_layers=6, num_heads=8)),
("v1/checkpoint", "ar.vocab", "en.vocab", ctranslate2.specs.TransformerBase()),
("v2/checkpoint", "ar.vocab", "en.vocab", ctranslate2.specs.TransformerBase()),
])
def test_opennmt_tf_model_conversion(tmpdir, model_path, src_vocab, tgt_vocab, model_spec):
model_path = os.path.join(
_TEST_DATA_DIR, "models", "transliteration-aren-all", "opennmt_tf", model_path)
if src_vocab is not None:
src_vocab = os.path.join(model_path, src_vocab)
if tgt_vocab is not None:
tgt_vocab = os.path.join(model_path, tgt_vocab)
converter = ctranslate2.converters.OpenNMTTFConverter(
model_path, src_vocab=src_vocab, tgt_vocab=tgt_vocab)
output_dir = str(tmpdir.join("ctranslate2_model"))
converter.convert(output_dir, model_spec)
translator = ctranslate2.Translator(output_dir)
output = translator.translate_batch([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]])
assert output[0][0]["tokens"] == ["a", "t", "z", "m", "o", "n"]
@pytest.mark.skipif(not _FRAMEWORK_DATA_EXIST, reason="Data files are not available")
@pytest.mark.parametrize("quantization", ["float16", "int16", "int8"])
def test_opennmt_tf_model_quantization(tmpdir, quantization):
model_path = os.path.join(
_TEST_DATA_DIR, "models", "transliteration-aren-all", "opennmt_tf", "v2", "checkpoint")
converter = ctranslate2.converters.OpenNMTTFConverter(
model_path,
src_vocab=os.path.join(model_path, "ar.vocab"),
tgt_vocab=os.path.join(model_path, "en.vocab"))
output_dir = str(tmpdir.join("ctranslate2_model"))
converter.convert(output_dir, ctranslate2.specs.TransformerBase(), quantization=quantization)
translator = ctranslate2.Translator(output_dir)
output = translator.translate_batch([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]])
assert output[0][0]["tokens"] == ["a", "t", "z", "m", "o", "n"]
@pytest.mark.skipif(not _FRAMEWORK_DATA_EXIST, reason="Data files are not available")
def test_opennmt_tf_variables_conversion(tmpdir):
model_path = os.path.join(
_TEST_DATA_DIR, "models", "transliteration-aren-all", "opennmt_tf", "v2", "checkpoint")
_, variables, src_vocab, tgt_vocab = opennmt_tf.load_model(
model_path,
src_vocab=os.path.join(model_path, "ar.vocab"),
tgt_vocab=os.path.join(model_path, "en.vocab"))
converter = ctranslate2.converters.OpenNMTTFConverter(
src_vocab=src_vocab, tgt_vocab=tgt_vocab, variables=variables)
output_dir = str(tmpdir.join("ctranslate2_model"))
converter.convert(output_dir, ctranslate2.specs.TransformerBase())
translator = ctranslate2.Translator(output_dir)
output = translator.translate_batch([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]])
assert output[0][0]["tokens"] == ["a", "t", "z", "m", "o", "n"]
@pytest.mark.skipif(not _FRAMEWORK_DATA_EXIST, reason="Data files are not available")
def test_opennmt_tf_model_conversion_invalid_vocab(tmpdir):
model_path = os.path.join(
_TEST_DATA_DIR, "models", "transliteration-aren-all", "opennmt_tf", "v2", "checkpoint")
# Swap source and target vocabularies.
converter = ctranslate2.converters.OpenNMTTFConverter(
model_path,
src_vocab=os.path.join(model_path, "en.vocab"),
tgt_vocab=os.path.join(model_path, "ar.vocab"))
output_dir = str(tmpdir.join("ctranslate2_model"))
with pytest.raises(ValueError):
converter.convert(output_dir, ctranslate2.specs.TransformerBase())
def test_opennmt_tf_shared_embeddings_conversion(tmpdir):
# Issue https://github.com/OpenNMT/CTranslate2/issues/118
import tensorflow as tf
import opennmt
vocab = opennmt.data.Vocab()
for i in range(10):
vocab.add(str(i))
vocab_path = str(tmpdir.join("vocab.txt"))
vocab.serialize(vocab_path)
num_layers = 3
num_heads = 4
model = opennmt.models.Transformer(
opennmt.inputters.WordEmbedder(32),
opennmt.inputters.WordEmbedder(32),
num_layers,
num_units=32,
num_heads=num_heads,
ffn_inner_dim=64,
share_embeddings=opennmt.models.EmbeddingsSharingLevel.ALL)
model.initialize({"source_vocabulary": vocab_path, "target_vocabulary": vocab_path})
model.create_variables()
checkpoint_prefix = str(tmpdir.join("ckpt"))
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.write(checkpoint_prefix)
converter = ctranslate2.converters.OpenNMTTFConverter(
model_path=checkpoint_prefix, src_vocab=vocab_path, tgt_vocab=vocab_path)
output_dir = str(tmpdir.join("ctranslate2_model"))
converter.convert(output_dir, ctranslate2.specs.TransformerSpec(num_layers, num_heads))
# Check that the translation runs.
translator = ctranslate2.Translator(output_dir)
translator.translate_batch([["1", "2", "3"]], max_decoding_length=10)
@pytest.mark.skipif(not _FRAMEWORK_DATA_EXIST, reason="Data files are not available")
def test_opennmt_py_model_conversion(tmpdir):
model_path = os.path.join(
_TEST_DATA_DIR, "models", "transliteration-aren-all", "opennmt_py", "aren_7000.pt")
converter = ctranslate2.converters.OpenNMTPyConverter(model_path)
output_dir = str(tmpdir.join("ctranslate2_model"))
converter.convert(output_dir, ctranslate2.specs.TransformerBase())
translator = ctranslate2.Translator(output_dir)
output = translator.translate_batch([["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"]])
assert output[0][0]["tokens"] == ["a", "t", "z", "m", "o", "n"]
@pytest.mark.skipif(not _FRAMEWORK_DATA_EXIST, reason="Data files are not available")
def test_opennmt_py_relative_transformer(tmpdir):
model_path = os.path.join(
_TEST_DATA_DIR, "models", "transliteration-aren-all",
"opennmt_py", "aren_relative_6000.pt")
converter = ctranslate2.converters.OpenNMTPyConverter(model_path)
output_dir = str(tmpdir.join("ctranslate2_model"))
converter.convert(output_dir, ctranslate2.specs.TransformerBaseRelative())
translator = ctranslate2.Translator(output_dir)
output = translator.translate_batch([
["آ" ,"ت" ,"ز" ,"م" ,"و" ,"ن"],
["آ" ,"ر" ,"ث" ,"ر"]])
assert output[0][0]["tokens"] == ["a", "t", "z", "o", "m", "o", "n"]
assert output[1][0]["tokens"] == ["a", "r", "t", "h", "e", "r"]
def test_layer_spec_validate():
class SubSpec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.a = np.ones([5], dtype=np.float16)
class Spec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.a = np.zeros([5], dtype=np.float32)
self.b = np.zeros([5], dtype=np.float16)
self.c = np.zeros([5], dtype=np.int32)
self.d = OPTIONAL
self.e = SubSpec()
self.f = True
spec = Spec()
spec.validate()
assert spec.a.dtype == np.float32
assert spec.b.dtype == np.float32
assert spec.c.dtype == np.int32
assert spec.d == OPTIONAL
assert spec.e.a.dtype == np.float32
assert spec.f.dtype == np.int8
def test_layer_spec_optimize():
class SubSpec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.a = np.ones([6], dtype=np.float32)
class Spec(ctranslate2.specs.LayerSpec):
def __init__(self):
self.a = np.ones([5], dtype=np.float32)
self.b = np.ones([5], dtype=np.float32)
self.c = np.zeros([5], dtype=np.int32)
self.d = np.dtype("float32").type(3.14)
self.weight = np.ones([5, 4], dtype=np.float32)
self.sub = SubSpec()
spec = Spec()
spec.optimize(quantization="int16")
assert spec.a.dtype == np.float32
assert spec.b == "a"
assert spec.c.dtype == np.int32
assert spec.d.dtype == np.float32
assert spec.weight.dtype == np.int16
assert spec.weight_scale.dtype == np.float32
spec = Spec()
spec.optimize(quantization="float16")
assert spec.a.dtype == np.float16
assert spec.b == "a"
assert spec.c.dtype == np.int32
assert spec.d.dtype == np.float32
assert spec.weight.dtype == np.float16
assert spec.sub.a.dtype == np.float16
def test_index_spec():
spec = ctranslate2.specs.TransformerBase()
assert isinstance(
index_spec(spec, "encoder/layer_5"),
transformer_spec.TransformerEncoderLayerSpec)
assert isinstance(
index_spec(spec, "encoder/layer_5/ffn"),
transformer_spec.FeedForwardSpec)
| StarcoderdataPython |
1680651 | <gh_stars>1-10
import os.path as osp
import numpy as np
import pickle
import random
from pathlib import Path
from functools import reduce
from typing import Tuple, List
from tqdm import tqdm
from pyquaternion import Quaternion
from nuscenes import NuScenes
from nuscenes.utils import splits
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.utils.geometry_utils import transform_matrix
from nuscenes.utils.data_classes import Box
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
general_to_detection = {
"human.pedestrian.adult": "pedestrian",
"human.pedestrian.child": "pedestrian",
"human.pedestrian.wheelchair": "ignore",
"human.pedestrian.stroller": "ignore",
"human.pedestrian.personal_mobility": "ignore",
"human.pedestrian.police_officer": "pedestrian",
"human.pedestrian.construction_worker": "pedestrian",
"animal": "ignore",
"vehicle.car": "car",
"vehicle.motorcycle": "motorcycle",
"vehicle.bicycle": "bicycle",
"vehicle.bus.bendy": "bus",
"vehicle.bus.rigid": "bus",
"vehicle.truck": "truck",
"vehicle.construction": "construction_vehicle",
"vehicle.emergency.ambulance": "ignore",
"vehicle.emergency.police": "ignore",
"vehicle.trailer": "trailer",
"movable_object.barrier": "barrier",
"movable_object.trafficcone": "traffic_cone",
"movable_object.pushable_pullable": "ignore",
"movable_object.debris": "ignore",
"static_object.bicycle_rack": "ignore",
}
cls_attr_dist = {
"barrier": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"bicycle": {
"cycle.with_rider": 2791,
"cycle.without_rider": 8946,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"bus": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 9092,
"vehicle.parked": 3294,
"vehicle.stopped": 3881,
},
"car": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 114304,
"vehicle.parked": 330133,
"vehicle.stopped": 46898,
},
"construction_vehicle": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 882,
"vehicle.parked": 11549,
"vehicle.stopped": 2102,
},
"ignore": {
"cycle.with_rider": 307,
"cycle.without_rider": 73,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 165,
"vehicle.parked": 400,
"vehicle.stopped": 102,
},
"motorcycle": {
"cycle.with_rider": 4233,
"cycle.without_rider": 8326,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"pedestrian": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 157444,
"pedestrian.sitting_lying_down": 13939,
"pedestrian.standing": 46530,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"traffic_cone": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"trailer": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 3421,
"vehicle.parked": 19224,
"vehicle.stopped": 1895,
},
"truck": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 21339,
"vehicle.parked": 55626,
"vehicle.stopped": 11097,
},
}
def box_velocity(
nusc, sample_annotation_token: str, max_time_diff: float = 1.5
) -> np.ndarray:
"""
Estimate the velocity for an annotation.
If possible, we compute the centered difference between the previous and next frame.
Otherwise we use the difference between the current and previous/next frame.
If the velocity cannot be estimated, values are set to np.nan.
:param sample_annotation_token: Unique sample_annotation identifier.
:param max_time_diff: Max allowed time diff between consecutive samples that are used to estimate velocities.
:return: <np.float: 3>. Velocity in x/y/z direction in m/s.
"""
current = nusc.get("sample_annotation", sample_annotation_token)
has_prev = current["prev"] != ""
has_next = current["next"] != ""
# Cannot estimate velocity for a single annotation.
if not has_prev and not has_next:
return np.array([np.nan, np.nan, np.nan])
if has_prev:
first = nusc.get("sample_annotation", current["prev"])
else:
first = current
if has_next:
last = nusc.get("sample_annotation", current["next"])
else:
last = current
pos_last = np.array(last["translation"])
pos_first = np.array(first["translation"])
pos_diff = pos_last - pos_first
time_last = 1e-6 * nusc.get("sample", last["sample_token"])["timestamp"]
time_first = 1e-6 * nusc.get("sample", first["sample_token"])["timestamp"]
time_diff = time_last - time_first
if has_next and has_prev:
# If doing centered difference, allow for up to double the max_time_diff.
max_time_diff *= 2
if time_diff > max_time_diff:
# If time_diff is too big, don't return an estimate.
return np.array([np.nan, np.nan, np.nan])
else:
return pos_diff / time_diff
def remove_close(points, radius: float) -> None:
"""
Removes point too close within a certain radius from origin.
:param radius: Radius below which points are removed.
"""
x_filt = np.abs(points[0, :]) < radius
y_filt = np.abs(points[1, :]) < radius
not_close = np.logical_not(np.logical_and(x_filt, y_filt))
points = points[:, not_close]
return points
def _second_det_to_nusc_box(detection):
box3d = detection["box3d_lidar"].detach().cpu().numpy()
scores = detection["scores"].detach().cpu().numpy()
labels = detection["label_preds"].detach().cpu().numpy()
box3d[:, -1] = -box3d[:, -1] - np.pi / 2
box_list = []
for i in range(box3d.shape[0]):
quat = Quaternion(axis=[0, 0, 1], radians=box3d[i, -1])
velocity = (*box3d[i, 6:8], 0.0)
box = Box(
box3d[i, :3],
box3d[i, 3:6],
quat,
label=labels[i],
score=scores[i],
velocity=velocity,
)
box_list.append(box)
return box_list
def _lidar_nusc_box_to_global(nusc, boxes, sample_token):
try:
s_record = nusc.get("sample", sample_token)
sample_data_token = s_record["data"]["LIDAR_TOP"]
except:
sample_data_token = sample_token
sd_record = nusc.get("sample_data", sample_data_token)
cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
sensor_record = nusc.get("sensor", cs_record["sensor_token"])
pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"])
data_path = nusc.get_sample_data_path(sample_data_token)
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(Quaternion(cs_record["rotation"]))
box.translate(np.array(cs_record["translation"]))
# Move box to global coord system
box.rotate(Quaternion(pose_record["rotation"]))
box.translate(np.array(pose_record["translation"]))
box_list.append(box)
return box_list
def _get_available_scenes(nusc):
available_scenes = []
print("total scene num:", len(nusc.scene))
for scene in nusc.scene:
scene_token = scene["token"]
scene_rec = nusc.get("scene", scene_token)
sample_rec = nusc.get("sample", scene_rec["first_sample_token"])
sd_rec = nusc.get("sample_data", sample_rec["data"]["LIDAR_TOP"])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = nusc.get_sample_data(sd_rec["token"])
if not Path(lidar_path).exists():
scene_not_exist = True
break
else:
break
if not sd_rec["next"] == "":
sd_rec = nusc.get("sample_data", sd_rec["next"])
else:
has_more_frames = False
if scene_not_exist:
continue
available_scenes.append(scene)
print("exist scene num:", len(available_scenes))
return available_scenes
def get_sample_data(
nusc, sample_data_token: str, selected_anntokens: List[str] = None
) -> Tuple[str, List[Box], np.array]:
"""
Returns the data path as well as all annotations related to that sample_data.
Note that the boxes are transformed into the current sensor's coordinate frame.
:param sample_data_token: Sample_data token.
:param selected_anntokens: If provided only return the selected annotation.
:return: (data_path, boxes, camera_intrinsic <np.array: 3, 3>)
"""
# Retrieve sensor & pose records
sd_record = nusc.get("sample_data", sample_data_token)
cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
sensor_record = nusc.get("sensor", cs_record["sensor_token"])
pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"])
data_path = nusc.get_sample_data_path(sample_data_token)
if sensor_record["modality"] == "camera":
cam_intrinsic = np.array(cs_record["camera_intrinsic"])
imsize = (sd_record["width"], sd_record["height"])
else:
cam_intrinsic = None
imsize = None
# Retrieve all sample annotations and map to sensor coordinate system.
if selected_anntokens is not None:
boxes = list(map(nusc.get_box, selected_anntokens))
else:
boxes = nusc.get_boxes(sample_data_token)
# Make list of Box objects including coord system transforms.
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.translate(-np.array(pose_record["translation"]))
box.rotate(Quaternion(pose_record["rotation"]).inverse)
# Move box to sensor coord system
box.translate(-np.array(cs_record["translation"]))
box.rotate(Quaternion(cs_record["rotation"]).inverse)
box_list.append(box)
return data_path, box_list, cam_intrinsic
def get_sample_ground_plane(root_path, version):
nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
rets = {}
for sample in tqdm(nusc.sample):
chan = "LIDAR_TOP"
sd_token = sample["data"][chan]
sd_rec = nusc.get("sample_data", sd_token)
lidar_path, _, _ = get_sample_data(nusc, sd_token)
points = read_file(lidar_path)
points = np.concatenate((points[:, :3], np.ones((points.shape[0], 1))), axis=1)
plane, inliers, outliers = fit_plane_LSE_RANSAC(
points, return_outlier_list=True
)
xx = points[:, 0]
yy = points[:, 1]
zz = (-plane[0] * xx - plane[1] * yy - plane[3]) / plane[2]
rets.update({sd_token: {"plane": plane, "height": zz,}})
with open(nusc.root_path / "infos_trainval_ground_plane.pkl", "wb") as f:
pickle.dump(rets, f)
def _fill_trainval_infos(nusc, train_scenes, val_scenes, test=False, nsweeps=10):
from nuscenes.utils.geometry_utils import transform_matrix
train_nusc_infos = []
val_nusc_infos = []
ref_chan = "LIDAR_TOP" # The radar channel from which we track back n sweeps to aggregate the point cloud.
chan = "LIDAR_TOP" # The reference channel of the current sample_rec that the point clouds are mapped to.
for sample in tqdm(nusc.sample):
""" Manual save info["sweeps"] """
# Get reference pose and timestamp
# ref_chan == "LIDAR_TOP"
ref_sd_token = sample["data"][ref_chan]
ref_sd_rec = nusc.get("sample_data", ref_sd_token)
ref_cs_rec = nusc.get(
"calibrated_sensor", ref_sd_rec["calibrated_sensor_token"]
)
ref_pose_rec = nusc.get("ego_pose", ref_sd_rec["ego_pose_token"])
ref_time = 1e-6 * ref_sd_rec["timestamp"]
ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token)
ref_cam_front_token = sample["data"]["CAM_FRONT"]
ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token)
# Homogeneous transform from ego car frame to reference frame
ref_from_car = transform_matrix(
ref_cs_rec["translation"], Quaternion(ref_cs_rec["rotation"]), inverse=True
)
# Homogeneous transformation matrix from global to _current_ ego car frame
car_from_global = transform_matrix(
ref_pose_rec["translation"],
Quaternion(ref_pose_rec["rotation"]),
inverse=True,
)
info = {
"lidar_path": ref_lidar_path,
"cam_front_path": ref_cam_path,
"cam_intrinsic": ref_cam_intrinsic,
"token": sample["token"],
"sweeps": [],
"ref_from_car": ref_from_car,
"car_from_global": car_from_global,
"timestamp": ref_time,
}
sample_data_token = sample["data"][chan]
curr_sd_rec = nusc.get("sample_data", sample_data_token)
sweeps = []
while len(sweeps) < nsweeps - 1:
if curr_sd_rec["prev"] == "":
if len(sweeps) == 0:
sweep = {
"lidar_path": ref_lidar_path,
"sample_data_token": curr_sd_rec["token"],
"transform_matrix": None,
"time_lag": curr_sd_rec["timestamp"] * 0,
# time_lag: 0,
}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
curr_sd_rec = nusc.get("sample_data", curr_sd_rec["prev"])
# Get past pose
current_pose_rec = nusc.get("ego_pose", curr_sd_rec["ego_pose_token"])
global_from_car = transform_matrix(
current_pose_rec["translation"],
Quaternion(current_pose_rec["rotation"]),
inverse=False,
)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = nusc.get(
"calibrated_sensor", curr_sd_rec["calibrated_sensor_token"]
)
car_from_current = transform_matrix(
current_cs_rec["translation"],
Quaternion(current_cs_rec["rotation"]),
inverse=False,
)
tm = reduce(
np.dot,
[ref_from_car, car_from_global, global_from_car, car_from_current],
)
lidar_path = nusc.get_sample_data_path(curr_sd_rec["token"])
time_lag = ref_time - 1e-6 * curr_sd_rec["timestamp"]
sweep = {
"lidar_path": lidar_path,
"sample_data_token": curr_sd_rec["token"],
"transform_matrix": tm,
"global_from_car": global_from_car,
"car_from_current": car_from_current,
"time_lag": time_lag,
}
sweeps.append(sweep)
info["sweeps"] = sweeps
assert (
len(info["sweeps"]) == nsweeps - 1
), f"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, you should duplicate to sweep num {nsweeps-1}"
""" read from api """
# sd_record = nusc.get('sample_data', sample['data']['LIDAR_TOP'])
#
# # Get boxes in lidar frame.
# lidar_path, boxes, cam_intrinsic = nusc.get_sample_data(
# sample['data']['LIDAR_TOP'])
#
# # Get aggregated point cloud in lidar frame.
# sample_rec = nusc.get('sample', sd_record['sample_token'])
# chan = sd_record['channel']
# ref_chan = 'LIDAR_TOP'
# pc, times = LidarPointCloud.from_file_multisweep(nusc,
# sample_rec,
# chan,
# ref_chan,
# nsweeps=nsweeps)
# lidar_path = osp.join(nusc.dataroot, "sample_10sweeps/LIDAR_TOP",
# sample['data']['LIDAR_TOP'] + ".bin")
# pc.points.astype('float32').tofile(open(lidar_path, "wb"))
#
# info = {
# "lidar_path": lidar_path,
# "token": sample["token"],
# # "timestamp": times,
# }
if not test:
annotations = [
nusc.get("sample_annotation", token) for token in sample["anns"]
]
locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)
# rots = np.array([b.orientation.yaw_pitch_roll[0] for b in ref_boxes]).reshape(-1, 1)
velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3)
rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(
-1, 1
)
names = np.array([b.name for b in ref_boxes])
tokens = np.array([b.token for b in ref_boxes])
gt_boxes = np.concatenate(
[locs, dims, velocity[:, :2], -rots - np.pi / 2], axis=1
)
# gt_boxes = np.concatenate([locs, dims, rots], axis=1)
assert len(annotations) == len(gt_boxes) == len(velocity)
info["gt_boxes"] = gt_boxes
info["gt_boxes_velocity"] = velocity
info["gt_names"] = np.array([general_to_detection[name] for name in names])
info["gt_boxes_token"] = tokens
if sample["scene_token"] in train_scenes:
train_nusc_infos.append(info)
else:
val_nusc_infos.append(info)
return train_nusc_infos, val_nusc_infos
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def create_nuscenes_infos_test(root_path, version="v1.0-trainval", nsweeps=10):
nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"]
assert version in available_vers
if version == "v1.0-trainval":
train_scenes = splits.train
# random.shuffle(train_scenes)
# train_scenes = train_scenes[:int(len(train_scenes)*0.2)]
val_scenes = splits.val
elif version == "v1.0-test":
train_scenes = splits.test
val_scenes = []
elif version == "v1.0-mini":
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise ValueError("unknown")
test = "test" in version
root_path = Path(root_path)
# filter exist scenes. you may only download part of dataset.
available_scenes = _get_available_scenes(nusc)
available_scene_names = [s["name"] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set(
[
available_scenes[available_scene_names.index(s)]["token"]
for s in train_scenes
]
)
val_scenes = set(
[available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes]
)
if test:
print(f"test scene: {len(train_scenes)}")
else:
print(f"train scene: {len(train_scenes)}, val scene: {len(val_scenes)}")
train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
nusc, train_scenes, val_scenes, test, nsweeps=nsweeps
)
if test:
print(f"test sample: {len(train_nusc_infos)}")
with open(
root_path / "infos_test_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(train_nusc_infos, f)
else:
print(
f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}"
)
with open(
root_path / "infos_train_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(train_nusc_infos, f)
with open(
root_path / "infos_val_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(val_nusc_infos, f)
def create_nuscenes_infos(root_path, version="v1.0-trainval", nsweeps=10):
nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"]
assert version in available_vers
if version == "v1.0-trainval":
train_scenes = splits.train
# random.shuffle(train_scenes)
# train_scenes = train_scenes[:int(len(train_scenes)*0.2)]
val_scenes = splits.val
elif version == "v1.0-test":
train_scenes = splits.test
val_scenes = []
elif version == "v1.0-mini":
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise ValueError("unknown")
test = "test" in version
root_path = Path(root_path)
# filter exist scenes. you may only download part of dataset.
available_scenes = _get_available_scenes(nusc)
available_scene_names = [s["name"] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set(
[
available_scenes[available_scene_names.index(s)]["token"]
for s in train_scenes
]
)
val_scenes = set(
[available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes]
)
if test:
print(f"test scene: {len(train_scenes)}")
else:
print(f"train scene: {len(train_scenes)}, val scene: {len(val_scenes)}")
train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
nusc, train_scenes, val_scenes, test, nsweeps=nsweeps
)
if test:
print(f"test sample: {len(train_nusc_infos)}")
with open(
root_path / "infos_test_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(train_nusc_infos, f)
else:
print(
f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}"
)
with open(
root_path / "infos_train_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(train_nusc_infos, f)
with open(
root_path / "infos_val_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
) as f:
pickle.dump(val_nusc_infos, f)
def get_box_mean(info_path, class_name="vehicle.car"):
with open(info_path, "rb") as f:
nusc_infos = pickle.load(f)
gt_boxes_list = []
for info in nusc_infos:
mask = np.array([s == class_name for s in info["gt_names"]], dtype=np.bool_)
gt_boxes_list.append(info["gt_boxes"][mask].reshape(-1, 7))
gt_boxes_list = np.concatenate(gt_boxes_list, axis=0)
print(gt_boxes_list.mean(0))
def eval_main(nusc, eval_version, res_path, eval_set, output_dir):
# nusc = NuScenes(version=version, dataroot=str(root_path), verbose=True)
cfg = config_factory(eval_version)
nusc_eval = NuScenesEval(
nusc,
config=cfg,
result_path=res_path,
eval_set=eval_set,
output_dir=output_dir,
verbose=True,
)
metrics_summary = nusc_eval.main(plot_examples=10,)
| StarcoderdataPython |
196245 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mywidget.ui'
#
# Created by: PyQt5 UI code generator 5.8.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MyWidget:
def setupUi(self, MyWidget):
MyWidget.setObjectName("MyWidget")
MyWidget.resize(226, 144)
self.verticalLayout = QtWidgets.QVBoxLayout(MyWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(MyWidget)
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setObjectName("gridLayout")
self.label1 = QtWidgets.QLabel(self.groupBox)
self.label1.setObjectName("label1")
self.gridLayout.addWidget(self.label1, 0, 0, 1, 1)
self.hLineEdit = QtWidgets.QLineEdit(self.groupBox)
self.hLineEdit.setObjectName("hLineEdit")
self.gridLayout.addWidget(self.hLineEdit, 0, 1, 1, 1)
self.label3 = QtWidgets.QLabel(self.groupBox)
self.label3.setObjectName("label3")
self.gridLayout.addWidget(self.label3, 0, 2, 1, 1)
self.label2 = QtWidgets.QLabel(self.groupBox)
self.label2.setObjectName("label2")
self.gridLayout.addWidget(self.label2, 1, 0, 1, 1)
self.wLineEdit = QtWidgets.QLineEdit(self.groupBox)
self.wLineEdit.setObjectName("wLineEdit")
self.gridLayout.addWidget(self.wLineEdit, 1, 1, 1, 1)
self.label4 = QtWidgets.QLabel(self.groupBox)
self.label4.setObjectName("label4")
self.gridLayout.addWidget(self.label4, 1, 2, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.hLayout = QtWidgets.QHBoxLayout()
self.hLayout.setObjectName("hLayout")
self.label5 = QtWidgets.QLabel(MyWidget)
self.label5.setObjectName("label5")
self.hLayout.addWidget(self.label5)
self.resComboBox = QtWidgets.QComboBox(MyWidget)
self.resComboBox.setObjectName("resComboBox")
self.resComboBox.addItem("")
self.resComboBox.addItem("")
self.resComboBox.addItem("")
self.hLayout.addWidget(self.resComboBox)
self.verticalLayout.addLayout(self.hLayout)
self.resLabel = QtWidgets.QLabel(MyWidget)
self.resLabel.setObjectName("resLabel")
self.verticalLayout.addWidget(self.resLabel)
self.retranslateUi(MyWidget)
QtCore.QMetaObject.connectSlotsByName(MyWidget)
def retranslateUi(self, MyWidget):
_translate = QtCore.QCoreApplication.translate
MyWidget.setWindowTitle(_translate("MyWidget", "PPT calc"))
self.groupBox.setTitle(_translate("MyWidget", "Video size in Powerpoint"))
self.label1.setText(_translate("MyWidget", "Height"))
self.hLineEdit.setText(_translate("MyWidget", "4.45"))
self.label3.setText(_translate("MyWidget", "cm"))
self.label2.setText(_translate("MyWidget", "Width"))
self.wLineEdit.setText(_translate("MyWidget", "6.78"))
self.label4.setText(_translate("MyWidget", "cm"))
self.label5.setText(_translate("MyWidget", "Target resolution"))
self.resComboBox.setItemText(0, _translate("MyWidget", "1024x768 (4:3)"))
self.resComboBox.setItemText(1, _translate("MyWidget", "1280x1024 (5:4)"))
self.resComboBox.setItemText(2, _translate("MyWidget", "1920x1200 (16:10)"))
self.resLabel.setText(_translate("MyWidget", "RESULT LINE"))
| StarcoderdataPython |
3240716 | <reponame>farewell12345/UnsplashDownloader
access_token = [] # 你的accessKey
url = f'https://api.unsplash.com'
keyword = '/search/photos/'
tags = ['girl', 'boy','Potrait','lady','man','woman'] # 爬取的tag,可以自行按照实例格式编辑增加
download_pages = 20 # 下载最大页数,不要太大,不然可能时间过长
| StarcoderdataPython |
4810966 | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning a 🤗 Transformers model for sequence classification on GLUE."""
import argparse
import logging
import os
import random
from datetime import datetime
from pathlib import Path
import torch
import datasets
from datasets import load_from_disk, load_metric
from torch.utils.data import DataLoader
import transformers
from accelerate import Accelerator, DistributedDataParallelKwargs
from huggingface_hub import Repository
from transformers import (
AutoTokenizer,
set_seed,
AutoConfig,
AutoModelForSequenceClassification,
DataCollatorWithPadding
)
from transformers.file_utils import get_full_repo_name
from transformers.utils.versions import require_version
from utils import custom_tokenize, load_args, path_adder, preprocess_function, MODEL_MAPPING, select_base
from data_collator import CustomDataCollator
from models import HierarchicalClassificationModel
from model_utils import copy_proj_layers, pretrained_masked_model_selector, pretrained_model_selector, pretrained_sequence_model_selector
from longformer import get_attention_injected_model
logger = logging.getLogger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
def parse_args():
parser = argparse.ArgumentParser(description="Finetune the hierarchical model on a text classification task")
parser.add_argument(
"--test_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
# Modified
"--max_seq_length",
type=int,
default=None,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
# Modified
"--finetuned_dir",
type=str,
help="Path to the output directory of finetuning.",
required=True,
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
# Modified:
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--max_document_length",
type=int,
default=None,
required=True,
help="The maximum number of sentences each document can have. Documents are either truncated or"
"padded if their length is different.",
)
parser.add_argument(
"--custom_model",
type=str,
help="If a custom model is to be used, the model type has to be specified.",
default=None,
choices=["hierarchical", "sliding_window", "longformer"]
)
args = parser.parse_args()
# Sanity checks
if args.test_file is None:
raise ValueError("Need testing file.")
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
# Modified: classification arguments
args = parse_args()
# TODO: change the logic
# Argments from pretraining
if args.custom_model == "hierarchical":
pretrained_args = load_args(os.path.join(args.finetuned_dir, "pretrained_args.json"))
args.use_sliding_window_tokenization = getattr(pretrained_args , "use_sliding_window_tokenization", False)
elif args.custom_model == "sliding_window":
args.use_sliding_window_tokenization = True
finetuned_args = load_args(os.path.join(args.finetuned_dir, "args.json"))
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
# Modified: output_dir is concatanated with datetime and command line arguments are also saved
# TODO: refactor
if args.custom_model == "hierarchical":
inter_path = path_adder(pretrained_args, finetuning=True, custom_model=args.custom_model)
else:
inter_path = path_adder(finetuned_args, finetuning=True)
inter_path += datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
args.output_dir = os.path.join(args.output_dir, inter_path)
os.makedirs(args.output_dir, exist_ok=True)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
# Modified
handlers=[
logging.FileHandler(os.path.join(args.output_dir, "loginfo.log")),
logging.StreamHandler()
]
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
accelerator.wait_for_everyone()
# Modified:
test_dataset = load_from_disk(args.test_file)
# Labels
label_list = test_dataset.unique("labels")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
if args.custom_model == "longformer":
tokenizer = AutoTokenizer.from_pretrained(
args.finetuned_dir,
max_length=args.max_seq_length,
padding="max_length",
truncation=True,
)
else:
tokenizer = AutoTokenizer.from_pretrained(args.finetuned_dir,
use_fast=True)
if args.custom_model in ("hierarchical", "sliding_window"):
model = HierarchicalClassificationModel(c_args=finetuned_args,
args=None if args.custom_model == "sliding_window" else pretrained_args,
tokenizer=tokenizer,
num_labels=num_labels)
model.load_state_dict(torch.load(os.path.join(args.finetuned_dir, "model.pth")))
elif args.custom_model == "longformer":
psm = pretrained_sequence_model_selector(select_base(args.finetuned_dir))
model = get_attention_injected_model(psm)
model = model.from_pretrained(
args.finetuned_dir,
max_length=args.max_seq_length,
num_labels=num_labels
)
else:
config = AutoConfig.from_pretrained(args.finetuned_dir, num_labels=num_labels)
model = AutoModelForSequenceClassification.from_pretrained(
args.finetuned_dir,
config=config,
)
if args.custom_model in ("hierarchical", "sliding_window"):
with accelerator.main_process_first():
# Modified
test_dataset = test_dataset.rename_column("text", "article_1")
ARTICLE_NUMBERS = 1
test_dataset = test_dataset.map(
custom_tokenize,
fn_kwargs={"tokenizer": tokenizer, "args": args, "article_numbers": ARTICLE_NUMBERS},
num_proc=args.preprocessing_num_workers,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
else:
with accelerator.main_process_first():
test_dataset = test_dataset.map(
preprocess_function,
fn_kwargs={"tokenizer": tokenizer, "max_seq_length": args.max_seq_length},
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=test_dataset.column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
# Modified
# Log a few random samples from the training set:
for index in random.sample(range(len(test_dataset)), 3):
logger.info(f"Sample {index} of the training set: {test_dataset[index]}.")
if args.custom_model in ("hierarchical", "sliding_window"):
ARTICLE_NUMBERS = 1
data_collator = CustomDataCollator(tokenizer=tokenizer,
max_sentence_len=pretrained_args.max_seq_length if args.max_seq_length is None else args.max_seq_length,
max_document_len=pretrained_args.max_document_length if args.max_document_length is None else args.max_document_length,
article_numbers=ARTICLE_NUMBERS,
consider_dcls=True if args.custom_model == "hierarchical" else False)
elif args.custom_model == "longformer":
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=512)
else:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
test_dataloader = DataLoader(test_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Prepare everything with our `accelerator`.
model, test_dataloader = accelerator.prepare(
model, test_dataloader
)
# Modified: only accuracy.
# Get the metric function
metric = load_metric("accuracy")
model.eval()
for batch in test_dataloader:
# Modified for Hierarchical Classification Model
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
)
eval_metric = metric.compute()
logger.info(f"final accuracy: {eval_metric}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1643236 | <reponame>UMKC-BigDataLab/DiSC
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <NAME>
# May 23, 2017
#
# Usage: ./cal-lost-packets.py -h
import argparse
import sys
import time
def eprint(obj):
sys.stderr.write(obj)
sys.stderr.write('\n')
def main(args):
filename = args.logfile
sentCount = 0.0
rcvdCount = 0.0
sTime = time.time()
logfile = open(filename,'r')
for line in logfile:
if 'received DiSC' in line:
rcvdCount += 1
elif 'DiSC Data Sending' in line:
sentCount += 1
eTime = time.time()
logfile.close()
ratio = round(100*(1 - (rcvdCount/sentCount)),2)
print("Number of packtes sent: "+str(int(sentCount)) )
print("Number of packtes received: "+str(int(rcvdCount)) )
print("Packet Loss Rate: " + str(ratio) +"%")
#eprint("Log processed in "+str(round(eTime - sTime,3)) + 's')
#eprint("Done.")
if __name__ == "__main__":
prog_desc = "Read DiSC log file and count the number of sent and "\
"received DiSC packets."
parser = argparse.ArgumentParser(description = prog_desc, add_help=False)
parser.add_argument_group('required arguments')
# Add a required argument
parser.add_argument('logfile', help='log_file is the DiSC log.',
metavar=('<log_file>'), type=str)
# Help argument
parser.add_argument('-h','--help',action='help',default=argparse.SUPPRESS,
help=argparse._('show this help message and exit.'))
# Rename the arguements' title. The default (i.e. positional arguments)
# is a bit confusing to users.
parser._positionals.title = "required arguments"
args = parser.parse_args()
main(args)
| StarcoderdataPython |
1633427 | #!/usr/bin/env python
# coding: utf-8
"""This script pulls business entity data from the workflow database and generates markdown
files for use as training data in the CRF NLP process. """
import configparser
import math
import psycopg2
import psycopg2.extras
from datetime import date
import datetime
def dbconnect():
"""Connect to the validation database"""
config = configparser.ConfigParser()
config.read('../hitl.config')
host = config.get('POSTGRES', 'Host')
user = config.get('POSTGRES', 'User')
pw = config.get('POSTGRES', 'Password')
port = config.get('POSTGRES', 'Port')
dbs = config.get('POSTGRES', 'Database')
#fire up the database connection
conn = psycopg2.connect("dbname='{0}' user='{1}' host='{2}' password='{3}'".format(dbs, user, host, pw))
cur = conn.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
return cur, conn
def getEntities():
"""Get business listing entity data and their parent annotation ids from the db"""
db = dbconnect()
cur = db[0]
conn = db[1]
sql = """select a."id" as "annotation_id", tv."key", tv."value", c.*
from "Text_Value" tv
join "Coordinates" c
on tv."coordinates_id" = c."id"
join "Annotation" a
on c."annotation_id" = a."id"
where a."subject_type" = 'business listing entities'
and "cs_task_id" is not null
order by a."id";"""
try:
cur.execute(sql,)
except:
conn.rollback()
cur.execute(sql)
rows = cur.fetchall()
return rows
def entitiesByListing(entities):
"""Group entities from database by annotation id"""
groups = {}
for e in entities:
if e['annotation_id'] not in groups:
groups[e['annotation_id']] = []
entity = {}
for k, v in e.items():
if k != ['annotation_id']:
entity[k] = v
groups[e['annotation_id']].append(entity)
else:
entity = {}
for k, v in e.items():
if k != ['annotation_id']:
entity[k] = v
groups[e['annotation_id']].append(entity)
grouped = []
for g, v in groups.items():
new_group = {'annotation_id':g,
'entities':v}
grouped.append(new_group)
return grouped
def sortBoxes(boxes):
"""Calculate coordinates for each box and sort in reading order"""
for ent in boxes:
ent['xmin'] = float(ent['x'])
ent['ymin'] = float(ent['y'])
ent['width'] = float(ent['width'])
ent['height'] = float(ent['height'])
ent['xmax'] = ent['xmin'] + ent['width']
ent['ymax'] = ent['ymin'] + ent['height']
num_boxes = len(boxes)
# sort from top to bottom and left to right
sorted_boxes = sorted(boxes, key=lambda x: (x['ymin'], x['xmin']))
_boxes = list(sorted_boxes)
# for j in range:
# check if the next neighbour box x coordinates is greater then the current box x coordinates if not swap them.
# repeat the swaping process to a threshold iteration and also select the threshold
#MAY NEED TO ADJUST THIS THRESHOLD
threshold_value_y = 25
for i in range(5):
for i in range(num_boxes - 1):
if abs(_boxes[i + 1]['ymin'] - _boxes[i]['ymin']) < threshold_value_y and (_boxes[i + 1]['xmin'] < _boxes[i]['xmin']):
tmp = _boxes[i]
_boxes[i] = _boxes[i + 1]
_boxes[i + 1] = tmp
return _boxes
def toMarkdown(entities):
"""Convert entities to markdown"""
md_string = []
for e in entities:
if e['key'] != 'graphic':
ent = '- [{0}]({1})'.format(e['value'], e['key'].replace(' ', '_'))
md_string.append(ent)
if len(md_string) > 2:
md_string = ' '.join(md_string)
else:
md_string = None
return md_string
def getNewVersion():
"""Create a new ML version in ML_Version table and return the version number"""
db = dbconnect()
cur = db[0]
conn = db[1]
#get latest version number
sql1 = """select max(version_number) as vn
from "ML_Version"
where ml_process_id = 5;
"""
try:
cur.execute(sql1,)
except:
conn.rollback()
cur.execute(sql1,)
rows = cur.fetchall()
#if no existing version, version number is 1.0.0, else version number + 1
if rows[0]['vn'] is None:
vn = '1.0.0'
else:
vn = str(int(rows[0]['vn'].split('.')[0]) + 1) + '.0.0'
#create a new version in the ML_Version table with the new version number
cur = db[0]
conn = db[1]
sql2 = """insert into "ML_Version" (ml_process_id, version_number, date_time)
values (5, %s, %s)
returning id;"""
data = (vn, datetime.datetime.utcnow(),)
try:
cur.execute(sql2, data)
except:
conn.rollback()
cur.execute(sql2, data)
rows = cur.fetchall()
conn.commit()
id = rows[0]['id']
return (id, vn)
def writeToTrainingDataset(filename, vn):
"""Write dataset path and ml_version_id to Training_Dataset table"""
db = dbconnect()
cur = db[0]
conn = db[1]
sql = """insert into "Training_Dataset" (ml_version_id, path)
values (%s, %s)
returning id;"""
data = (vn, filename,)
try:
cur.execute(sql, data)
except:
conn.rollback()
cur.execute(sql, data)
rows = cur.fetchall()
conn.commit()
def writeToTrain(annotation_id, vn):
"""Write annotation_id and ml_version_id for each annotation in the training set to Train table"""
db = dbconnect()
cur = db[0]
conn = db[1]
sql = """insert into "Train" (ml_version_id, annotation_id)
values (%s, %s)
returning id;"""
data = (vn, annotation_id,)
try:
cur.execute(sql, data)
except:
conn.rollback()
cur.execute(sql, data)
rows = cur.fetchall()
conn.commit()
def outputMarkdown(entities, filename):
"""Output markdown text as a markdown file"""
f = open(filename, 'w')
for e in entities:
if e['train'] is not None:
f.write(e['train'] + '\n')
f.close()
if __name__ == "__main__":
#get entities from database
entities = getEntities()
grouped_ents = entitiesByListing(entities)
for e in grouped_ents:
sorted_ents = sortBoxes(e['entities'])
e['train'] = toMarkdown(sorted_ents)
#get latest version number and insert into ML_Version, returning id and version number (x.0.0)
version = getNewVersion()
filename = '../training/CRF_training_data_{}.md'.format(version[1])
#write dataset into Training_Dataset table
path = filename.replace('../', '')
writeToTrainingDataset(path, version[0])
#write annotations with ml_version_id to Train table
for e in grouped_ents:
if e['train'] is not None:
writeToTrain(e['annotation_id'], version[0])
#output md file
data = outputMarkdown(grouped_ents, filename)
| StarcoderdataPython |
3265074 | <reponame>mvonbun/python-script-templates<filename>python_pgfplots.py
import numpy as np
def writeVector(fid, npVector, prefix='', fmt='%.8e'):
"""Write (append) numpy vector to a text file for use as PGF tables."""
np.savetxt(fid, npVector, fmt=fmt, newline=' ',
header=prefix, comments='')
fid.write("\n")
| StarcoderdataPython |
4804263 | """
Suppose that we analyze an algorithm and decide that it has the following relationship between the input size, n, and the number of operations needed to carry out the algorithm:
N = n^2 + 5
Where n is the input size and N is the number of operations required.
For example, if we gave this algorithm an input of 22, the number of required operations would be 2^2 +5 or simply 9.
The thing to notice in the above exercise, is this: In n^2 + 5n
2
+5, the 55 has very little impact on the total efficiency—especially as the input size gets larger and larger. Asking the computer to do 10,005 operations vs. 10,000 operations makes little difference. Thus, it is the n^2n
2
that we really care about the most, and the + 5+5 makes little difference.
Most of the time, when analyzing the efficiency of an algorithm, the most important thing to know is the order.
In other words, we care a lot whether the algorithm's time-complexity has a linear order or a quadratic order (or some other order).
This means that very often (in fact, most of the time) when you are asked to analyze an algorithm, you can do so by making an approximation that significantly simplifies things.
In this next video, Brynn will discuss this concept and show how it's used with Big O Notation.
https://youtu.be/zOenWuEDhFo
"""
| StarcoderdataPython |
3379789 | import os
try:
from setuptools import setup
extra = dict(entry_points={
'console_scripts': ['cluster-explore=cluster_config.explore:main',
'cluster-generate=cluster_config.generate:main',
'cluster-push=cluster_config.push:main',
'cluster-generate-push=cluster_config.generate_push:main']
})
except ImportError:
from distutils.core import setup
extra = dict(scripts=["cluster-explore","cluster-generate","cluster-push", "cluster-generate-push"])
setup(
# Application name:
name="cluster_config",
# Version number (initial):
version=u"0.1.0",
# Application author details:
# Packages
packages=["cluster_config","cluster_config/cdh","cluster_config/tests", "cluster_config/utils"],
# Include additional files into the package
include_package_data=True,
# Details
#url="https://analyticstoolkit.intel.com",
#
license="Apache 2.0",
description="big data cluster configuration tool",
long_description=open("README.md").read(),
# Dependent packages (distributions)
install_requires=[
'argparse >= 1.3.0',
'cm-api >= 9.0.0',
'pyyaml >= 3.11'
],
**extra
)
| StarcoderdataPython |
1647177 | <gh_stars>0
import yaml
import subprocess
import time
import random
import string
import base64
import sys
try:
postfix= sys.argv[1]
except:
print('Please provide postfix that was generated when cluster was created')
sys.exit(1)
if postfix == None:
print('Please provide postfix that was generated when cluster was created')
sys.exit(1)
cluster_name = 'swir-demo-cluster-'+postfix
#https://github.com/aws/containers-roadmap/issues/632
autoscale_group_name='swir-demo-autoscaling-group-'+postfix
capacity_provider_name = 'swir-demo-capacity-provider-'+postfix
launch_configuration = 'swir-demo-launch-configuration-' + postfix
print('Deleting cluster name ' + cluster_name)
print('Deleting autoscaling group name ' + autoscale_group_name)
print('Deleting launch configuration name ' + launch_configuration)
services = ['incoming','orders','inventory','billing','shipments']
tasks = ['swir-order-generator','swir-order-processor','swir-inventory-processor','swir-billing-processor','swir-shipments-sink']
files = ['swir-order-generator-task.yaml','swir-order-processor-task.yaml','swir-inventory-processor-task.yaml','swir-billing-processor-task.yaml','swir-shipment-sink-task.yaml']
for i,service in enumerate(services):
file_name = files[i]
task = tasks[i]
print("Deleting service " + service + " " + task + " " + file_name);
print("Deleting service " + service)
try:
subprocess.check_output('aws ecs update-service --cluster ' + cluster_name + ' --service ' + service + ' --desired-count=0',shell=True)
subprocess.check_output('aws ecs delete-service --cluster ' + cluster_name + ' --service ' + service,shell=True)
except:
print('Problem with service ' + service)
output = subprocess.check_output('aws ecs list-tasks --output yaml --cluster ' + cluster_name,shell=True)
data_loaded = yaml.safe_load(output)
active_tasks = data_loaded['taskArns']
for at in active_tasks:
taskArn = at
print("Deleting task " + at)
subprocess.check_output('aws ecs stop-task --cluster ' + cluster_name + ' --task ' + taskArn,shell=True )
output = subprocess.check_output('aws ecs describe-task-definition --output yaml --task-definition ' + task,shell=True)
data_loaded = yaml.safe_load(output)
taskArn = data_loaded['taskDefinition']['taskDefinitionArn']
print("Deregistering task definition " + taskArn)
subprocess.check_output('aws ecs deregister-task-definition --task-definition ' + taskArn,shell=True)
subprocess.call('aws autoscaling delete-auto-scaling-group --force-delete --auto-scaling-group-name ' + autoscale_group_name,shell=True)
subprocess.call('aws autoscaling delete-launch-configuration --launch-configuration-name ' + launch_configuration,shell=True)
is_active = False
while not is_active:
print('Describe cluster');
output = subprocess.check_output('aws ecs describe-clusters --output yaml --cluster ' + cluster_name,shell=True)
data_loaded = yaml.safe_load(output)
registered_instances = data_loaded['clusters'][0]['registeredContainerInstancesCount']
is_active = (registered_instances==0)
print('Cluster status '+ data_loaded['clusters'][0]['status'] + ' registered instances ' + str(registered_instances));
time.sleep(5)
subprocess.check_output('aws ecs delete-cluster --cluster ' + cluster_name,shell=True)
| StarcoderdataPython |
4833428 | from impactutils.extern.openquake import geodetic
class Point(object):
"""Simple point class to contain lat/lon/depth values."""
def __init__(self,longitude,latitude,depth=0):
"""Create a Point object.
Args:
longitude (float): Longitude of a point.
latitude (float): Latitude of a point.
depth (float): Depth (km) of a point.
"""
self.longitude = longitude
self.latitude = latitude
self.depth = depth
@property
def x(self):
"""Access the longitude of a point.
Returns:
float: Longitude value.
"""
return self.longitude
@property
def y(self):
"""Access the latitude of a point.
Returns:
float: Latitude value.
"""
return self.latitude
@property
def z(self):
"""Access the depth of a point.
Returns:
float: Depth value.
"""
return self.depth
def azimuth(self,point):
"""Get the angle (in degrees) between two points.
Args:
point (Point): Point object.
Returns:
float: Azimuth angle in degrees between this Point and input Point.
"""
return geodetic.azimuth(self.longitude, self.latitude,
point.longitude, point.latitude)
| StarcoderdataPython |
80349 | <reponame>QiliangFan/Baidu-Curve
# -*- coding: utf-8 -*-
"""
Curve
~~~~
db package
:copyright: (c) 2017-2018 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""
import flask_sqlalchemy
db = flask_sqlalchemy.SQLAlchemy()
| StarcoderdataPython |
3213880 | # -*- coding: utf-8 -*-
"""Define custom errors for this app."""
class Error(Exception):
"""Global error for this app. If we catch this, we will cath all childs.
Usage::
from errors import Error as PostsError
try:
# action on posts
except PostsError:
# Handle all errors from account
"""
pass
class InvalidDateOrder(Error):
"""When date of end is before date of start."""
pass
class InvalidCategories(Error):
"""When choosen categories does not match to same group.
This should not occur, as they should be filtered in front end. But anyway we
should take care of this.
"""
def __init__(self, parent, son):
self.parent = parent
self.son = son
def __str__(self):
return 'Choosen categories does not belong together: {}, {}'.format(
self.parent, self.son,
)
| StarcoderdataPython |
110375 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import logging
import sys
import boto3
import research_pacs.shared.dicom_util as rpacs_dicom_util
import research_pacs.shared.util as rpacs_util
from research_pacs.de_identifier.dicom import DicomDeidentifier
from research_pacs.de_identifier.env import get_env
from research_pacs.de_identifier.ocr import get_box_coordinates
from research_pacs.shared.database import DB, DBKeyJsonValue, DBDicomMapping
from research_pacs.shared.orthanc import OrthancClient
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
env = None
client = None
def main():
logger.info('Starting de-identifier')
try:
global env
env = get_env()
# Create the clients
global client
client = rpacs_util.ClientList()
client.add('db', DB(env.pg_host, env.pg_port, env.pg_user, env.pg_pwd, env.pg_db))
client.add('db_msg', DBKeyJsonValue(db_client=client.db, table_name="rpacs_related_msg"))
client.add('db_mapping', DBDicomMapping(db_client=client.db))
client.add('src_orthanc', OrthancClient(env.src_orthanc_host, env.src_orthanc_user, env.src_orthanc_pwd))
client.add('dst_orthanc', OrthancClient(env.dst_orthanc_host, env.dst_orthanc_user, env.dst_orthanc_pwd))
client.add('sqs', boto3.client('sqs', region_name=env.region))
# Exit if any of the previous steps failed
except Exception as e:
logger.fatal(f'Failed to initialize the program - {e}')
sys.exit(1)
# Loop until the program is interrupted
killer = rpacs_util.GracefulKiller()
while not killer.kill_now:
# Retrieve up to 10 messages from the SQS queue
try:
messages_returned = False
logger.debug(f'Retrieving messages from the SQS queue')
sqs_response = client.sqs.receive_message(
QueueUrl=env.queue_url,
AttributeNames=['ApproximateReceiveCount'],
MaxNumberOfMessages=10,
MessageAttributeNames=['All'],
VisibilityTimeout=env.queue_timeout,
WaitTimeSeconds=1
)
# Process each message and delete it from the queue if it succeeded
messages = sqs_response['Messages'] if 'Messages' in sqs_response else []
logger.debug(f'SQS returned {len(messages)} messages to process')
if len(messages) > 0:
messages_returned = True
for message in messages:
try:
# Delete the message if it was served more than `queue_max_attemps`
nb_attempts = int(message['Attributes']['ApproximateReceiveCount'])
if nb_attempts > env.queue_max_attemps:
client.sqs.delete_message(QueueUrl=env.queue_url, ReceiptHandle=message['ReceiptHandle'])
continue
process_message(message['Body'])
client.sqs.delete_message(QueueUrl=env.queue_url, ReceiptHandle=message['ReceiptHandle'])
except Exception as e:
logger.error(f'Failed to process the message ({nb_attempts} attempts) - {e}')
except Exception as e:
logger.error(f'Failed to poll messages from SQS - {e}')
# Close the DB connection after each iteration
client.db.close()
# Wait 5 seconds if the previous request returned no SQS message
if messages_returned is False:
logger.debug(f"Waiting 5 seconds")
killer.sleep(5)
# Before the program exits
logger.info('Stopping de-identifier')
def process_message(msg_str):
"""
Pass the message to another function depending on the event type (e.g. NewDICOM)
Args:
msg_str (str): Content of the SQS message
"""
try:
msg = json.loads(msg_str)
logger.debug(f"New message: {json.dumps(msg)}")
event_type = msg['EventType']
except:
logger.error(f'Skipping malformed message: {msg_str}')
return
"""
Message received when a new DICOM file must be de-identified and sent to the destination
Orthanc server.
Format : {
"EventType": "NewDICOM"
"Source": Location of the DICOM file, either from Orthanc (orthanc://instance-id),
from Amazon S3 (s3://bucket/key), or from a local file system
"ConfigFile": [Optional] Location of a custom config file to use to de-identify this
DICOM file. Use the default config file is no value is provided
"Destination": [Optional] Location where the de-identified DICOM file should be sent:
s3://bucket/key for Amazon S3, or /folder/file.ext for a local file. If
no value is provided, the file is sent to the destination Orthanc server
"LogFile": [Optional] If the destination is S3 or a local file, set this to `True` to
write a file with detailed de-identification steps, or the error message
if the de-identification process failed
"Skip": [Optional] Skip this file is `True`. Default is `False`
"Retry": [Optional] Set to `False` to not retry if the message processing failed
}
"""
if event_type == 'NewDICOM':
try:
process_new_dicom(msg)
except Exception as e:
if not ('Retry' in msg and msg['Retry'] == False):
raise e
def process_new_dicom(msg):
"""
Process incoming DICOM files as follows:
If the new DICOM file is stored in Amazon S3 or a file system that is locally mounted to this
server, we send it to Orthanc so that we can leverage Orthanc native API for some of the
de-identification parts. A new SQS message will be trigger the de-identification for the
Orthanc instance.
If the new DICOM file is stored in the source Orthanc server, check if the Orthanc instance is
associated with previous "instructions" (see below) and overwrite the current message if needed.
Then, process the new Orthanc DICOM instance with the function `process_new_dicom_orthanc` and
delete the instance from the source Orthanc server if the processing succeeded.
Args:
msg (dict): Content of the SQS message
"""
try:
dicom_source = msg['Source']
logger.info(f"New DICOM file: Source={dicom_source}")
except:
logger.error(f'Attribute "Source" is missing in the message')
return
# If the DICOM instance is stored in a local file system or S3, upload it to Orthanc and store
# the original message in the database. The change pooler will detect that new Orthanc instance
# and send another message. That new message will be replaced by the "instructions" contained in
# the previous message, such as using a custom config file
if not dicom_source.startswith('orthanc://'):
try:
dicom_file = rpacs_util.load_file(dicom_source, env.region, 'bytes')
instance_id = client.src_orthanc.upload_instance(dicom_file)
client.db_msg.upsert(instance_id, msg)
logger.info(f"Uploaded the local DICOM file to Orthanc - Instance ID={instance_id}")
except Exception as e:
raise Exception(f'Failed to upload the local DICOM file to Orthanc - {e}')
# If the DICOM file is stored in the source Orthanc server
else:
instance_id = dicom_source.replace('orthanc://', '')
# Check if a message was previously stored in the database, with "instructions" on how to
# process this Orthanc instance
try:
previous_msg = client.db_msg.get(instance_id)
if previous_msg != None:
if 'Source' in previous_msg:
previous_msg['OriginalSource'] = previous_msg['Source']
previous_msg['Source'] = dicom_source
msg = previous_msg
logger.debug(f"Modified the message: {json.dumps(previous_msg)}")
except Exception as e:
raise Exception(f'Failed to check if a related message was previously stored in the database - {e}')
# Skip the message if it has an attribute `Skip=True` and delete the associated Orthanc
# instance, because it was uploaded by the de-identifier
if 'Skip' in msg and msg['Skip'] == True:
logger.info(f'Skipping the Orthanc instance (Skip=True)')
client.src_orthanc.delete_instance(instance_id)
# Otherwise, process the message and delete the original DICOM file in Orthanc unless
# we need to preserve them
else:
process_new_dicom_orthanc(instance_id, msg)
if env.preserve_files.lower() == 'no':
client.src_orthanc.delete_instance(instance_id)
def process_new_dicom_orthanc(src_instance_id, msg):
"""
Process new DICOM instances stored in the source Orthanc server as follows:
- Download the configuration file how original DICOM files are processed
- De-identify the DICOM file uing the function `deidentify_dicom_orthanc`
- Write the de-identified DICOM file to the destination (destination Orthanc server, S3 or local)
- Write detailed logs to S3 or file, if needed
Args:
src_instance_id (str): Instance ID in the source Orthanc server
msg (dict): Content of the SQS message
"""
err = None
logs = {'Message': msg}
try:
# Load the config file, either from the custom location passed in the message or from the
# default location, and create a DicomDeidentifier object
try:
if 'ConfigFile' in msg:
config_location = msg['ConfigFile']
logger.info(f'Using a custom config file "{config_location}"')
else:
config_location = env.config_file
logger.debug(f'Loading the config file at "{config_location}"')
logs['ConfigFile'] = config_location
config = rpacs_util.load_file(config_location, env.region, 'yaml')
except Exception as e:
raise Exception(f'Failed to download the config file - {e}')
# Download the original DICOM file
try:
logger.debug('Loading the original DICOM file from Orthanc')
src_dicom = client.src_orthanc.download_instance_dicom(src_instance_id)
except Exception as e:
raise Exception(f'Failed to download the original DICOM file from Orthanc - {e}')
# De-identify the DICOM file
logger.debug('De-identifying the original DICOM file from Orthanc')
dst_dicom = deidentify_dicom_orthanc(src_instance_id, src_dicom, config, logs)
# Send the de-identified DICOM file to the destination, if the call is `deidentify_dicom`
# returned a DICOM file (the file might be skipped based on its labels)
try:
if dst_dicom != None:
if 'Destination' in msg:
rpacs_util.write_file(dst_dicom, msg['Destination'], env.region, 'bytes')
logger.info(f"Uploaded the de-identified DICOM file to \"{msg['Destination']}\"")
else:
dst_instance_id = client.dst_orthanc.upload_instance(dst_dicom)
logger.info(f"Uploaded the de-identified DICOM file to Orthanc - ID={dst_instance_id}")
except Exception as e:
raise Exception(f'Failed the write the de-identified DICOM file - {e}')
except Exception as e:
logger.error(f'Failed to process the DICOM file - {e}')
logs.update({'Error': str(e)})
err = e
# Print the result logs to the screen
for key, value in logs.items():
if key == 'Message':
continue
elif key == 'TransformationsApplied':
for t_key, t_value in value.items():
logger.info(f'Result: {key} {t_key}={json.dumps(t_value)}')
else:
logger.info(f'Result: {key}={json.dumps(value)}')
# Upload the detailed logs
if 'LogFile' in msg:
try:
rpacs_util.write_file(logs, msg['LogFile'], env.region, 'json')
logger.info(f"Uploaded the detailed logs to \"{msg['LogFile']}\"")
except Exception as e:
logger.error(f'Failed to upload the log file - {e}')
# Raise the exception err if it was catched earlier
if err != None:
raise err
def deidentify_dicom_orthanc(instance_id, src_dicom, config, logs):
"""
De-identify a DICOM instance from in the source Orthanc server as follows:
- Retrieve the labels matching this Orthanc instance (`Labels` section of the configuration
file) and whether the DICOM should be de-identified and sent to the destination, or skipped
(`ScopeToForward` section of the config file)
- If the Orthanc instance is not skipped:
- Download from Orthanc a transcoded version of the DICOM file to "Explicit VR Little
Endian" if pixel data must be edited to mask burned-in annotations
- If OCR must be used to detect burned-in annotations, retrieve the pixel coordinates of
boxes that need to be masked
- Apply the transformation rules (`Transformations` section of the config file) and
retrieve the de-identified DICOM file
- Transcode the DICOM file if the target transfer syntax is not "Explicit VR Little Endian"
- Return the de-identified DICOM file
Args:
instance_id (str): Orthanc instance ID
src_dicom (bytes): Content of the DICOM file downloaded from Orthanc
config (dict): Configuration file
logs (dict): Dict where logs should be added
"""
# Create a DicomDeidentifier object and load the DICOM file
try:
deidentifier = DicomDeidentifier(config, client.db, client.db_mapping)
except Exception as e:
raise Exception(f'Failed to initialize the DicomDeidentifier - {e}')
# The function `load_dicom` returns a list of matching labels for this DICOM file, and whether
# the DICOM file should be discarded based on these tags
try:
labels, skipped = deidentifier.load_dicom(src_dicom)
logs.update({
'OriginalDICOMFile': {
'SOPInstanceUID': rpacs_dicom_util.get_sop_instance_uid(deidentifier.dicom),
'TransferSyntaxUID': rpacs_dicom_util.get_transfer_syntax_uid(deidentifier.dicom)
},
'MatchingLabels': labels,
'Skipped': skipped
})
except Exception as e:
raise Exception(f'Failed to load the original DICOM file in the DicomDeidentifier - {e}')
if skipped is True:
return None
# Log transformations to apply
transformations = deidentifier.get_transformations_to_apply()
logs.update({'TransformationsToApply': transformations})
# If burned-in annotations must be removed from the pixel data, the DICOM file must be in
# uncompressed and in Little Endian format. If needed, we use Orthanc to download a transcoded
# version of the DICOM file
need_transcode, src_transfer_syntax = deidentifier.is_transcoding_needed()
if need_transcode is True:
try:
src_dicom = client.src_orthanc.download_instance_dicom(instance_id, transcode='1.2.840.10008.1.2.1')
deidentifier.load_dicom(src_dicom, src_transfer_syntax, initial_load=False)
except Exception as e:
raise Exception(f'Failed to transcode the original DICOM file to "Explicit VR Little Endian" with Orthanc in order to alter pixel data - {e}')
# If OCR must be used to detect burned-in annotations in pixel data
if deidentifier.is_ocr_needed():
# Find box coordinates with burned-in annotations and update the transformations rules
try:
dimensions = rpacs_dicom_util.get_dimensions(deidentifier.dicom)
boxes = get_box_coordinates(client.src_orthanc, instance_id, env.rekognition_region, dimensions)
deidentifier.add_box_coordinates(boxes)
except Exception as e:
raise Exception(f'Failed to find burned-in annotations in the original DICOM file with OCR - {e}')
# Apply the transformation rules
try:
dst_transfer_syntax = deidentifier.apply_transformations(logs)
except Exception as e:
raise Exception(f'Failed to apply the transformation rules to the original DICOM file - {e}')
# `apply_transformations` returns a transfer syntax if the de-identified DICOM file must be
# transcoded, or `None` if it should be sent as-is to the destination Orthanc server. If it must
# be transcoded, we upload the de-identified DICOM file to the source Orthanc server, and we
# download and return a transcoded version
dst_dicom = rpacs_dicom_util.export_dicom(deidentifier.dicom)
if dst_transfer_syntax != None:
try:
# Temporarily change the SOP Instance ID before uploading the de-identified DICOM file to
# Orthanc in order to prevent the original DICOM file
src_sop_instance_uid = rpacs_dicom_util.get_sop_instance_uid(deidentifier.dicom)
rpacs_dicom_util.set_sop_instance_uid(deidentifier.dicom)
dst_dicom = rpacs_dicom_util.export_dicom(deidentifier.dicom)
# Upload the de-identified DICOM file to Orthanc. This new DICOM file should be ignored by the
# de-identifier
tmp_instance_id = client.src_orthanc.upload_instance(dst_dicom)
client.db_msg.upsert(tmp_instance_id, {'Skip': True})
# Download a transcoded version of the de-identified DICOM file, and we restore the SOP
# Instance ID to its original value
dst_dicom = client.src_orthanc.download_instance_dicom(tmp_instance_id, transcode=dst_transfer_syntax)
deidentifier.load_dicom(dst_dicom, initial_load=False)
rpacs_dicom_util.set_sop_instance_uid(deidentifier.dicom, src_sop_instance_uid)
dst_dicom = rpacs_dicom_util.export_dicom(deidentifier.dicom)
except Exception as e:
raise Exception(f'Failed to transcode the de-identified DICOM file to "{dst_transfer_syntax}" - {e}')
return dst_dicom
| StarcoderdataPython |
1673180 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scanpy as sc
from scIB import utils
from scIB import metrics
def opt_louvain(adata, label_key, cluster_key, function=None, resolutions=None,
use_rep=None,
inplace=True, plot=False, force=True, verbose=True, **kwargs):
"""
params:
label_key: name of column in adata.obs containing biological labels to be
optimised against
cluster_key: name of column to be added to adata.obs during clustering.
Will be overwritten if exists and `force=True`
function: function that computes the cost to be optimised over. Must take as
arguments (adata, group1, group2, **kwargs) and returns a number for maximising
resolutions: list if resolutions to be optimised over. If `resolutions=None`,
default resolutions of 20 values ranging between 0.1 and 2 will be used
use_rep: key of embedding to use only if adata.uns['neighbors'] is not defined,
otherwise will be ignored
returns:
res_max: resolution of maximum score
score_max: maximum score
score_all: `pd.DataFrame` containing all scores at resolutions. Can be used to plot the score profile.
clustering: only if `inplace=False`, return cluster assignment as `pd.Series`
plot: if `plot=True` plot the score profile over resolution
"""
if function is None:
function = metrics.nmi
if cluster_key in adata.obs.columns:
if force:
if verbose:
print(f"Warning: cluster key {cluster_key} already exists " +
"in adata.obs and will be overwritten")
else:
raise ValueError(f"cluster key {cluster_key} already exists in " +
"adata, please remove the key or choose a different name." +
"If you want to force overwriting the key, specify `force=True`")
if resolutions is None:
n = 20
resolutions = [2*x/n for x in range(1,n+1)]
score_max = 0
res_max = resolutions[0]
clustering = None
score_all = []
try:
adata.uns['neighbors']
except KeyError:
if verbose:
print('computing neigbours for opt_cluster')
sc.pp.neighbors(adata, use_rep=use_rep)
for res in resolutions:
sc.tl.louvain(adata, resolution=res, key_added=cluster_key)
score = function(adata, label_key, cluster_key, **kwargs)
score_all.append(score)
if score_max < score:
score_max = score
res_max = res
clustering = adata.obs[cluster_key]
del adata.obs[cluster_key]
if verbose:
print(f'optimised clustering against {label_key}')
print(f'optimal cluster resolution: {res_max}')
print(f'optimal score: {score_max}')
score_all = pd.DataFrame(zip(resolutions, score_all), columns=('resolution', 'score'))
if plot:
# score vs. resolution profile
sns.lineplot(data= score_all, x='resolution', y='score').set_title('Optimal cluster resolution profile')
plt.show()
if inplace:
adata.obs[cluster_key] = clustering
return res_max, score_max, score_all
else:
return res_max, score_max, score_all, clustering
| StarcoderdataPython |
3371117 | <reponame>nick41746/Techjam-Application<gh_stars>0
import operator
import re
from http import HTTPStatus
import os
from flask import Flask, jsonify, request
import math
robot_re = re.compile(r"^robot#([1-9][0-9]*)$")
robots = {}
app = Flask(__name__)
def distance(pos1, pos2, metric="euclidean"):
try:
if metric == "manhattan":
p = 1
# return abs(pos1["x"] - pos2["x"]) + abs(pos1["y"] - pos2["y"])
elif metric == "euclidean":
p = 2
else:
raise Exception("Error")
return math.pow(math.pow(abs(pos1["x"] - pos2["x"]), p) + math.pow(abs(pos1["y"] - pos2["y"]), p),(1/p))
except:
raise Exception("Error")
def _get_position(obj):
if 'x' in obj and 'y' in obj:
return obj
elif isinstance(obj, str):
if robot_re.match(obj):
_id = int(obj.split('#')[1])
if _id in robots:
return robots[_id]
else:
raise Exception("Error")
else:
raise Exception("Error")
else:
raise Exception("Error")
@app.route("/distance", methods=['POST'])
def calculate_distance():
body = request.get_json()
met = "euclidean"
if 'first_pos' not in body or 'second_pos' not in body:
return '', HTTPStatus.BAD_REQUEST
if 'metric' in body:
met = body['metric']
try:
pos1 = _get_position(body['first_pos'])
pos2 = _get_position(body['second_pos'])
except:
return '', HTTPStatus.BAD_REQUEST
try:
result = distance(pos1, pos2, met)
except:
return '', HTTPStatus.FAILED_DEPENDENCY
result = f"{result:.3f}"
return jsonify(distance=result), HTTPStatus.OK
@app.route("/robot/<robot_id>/position", methods=['PUT'])
def update_position(robot_id):
body = request.get_json()
try:
_id = int(robot_id)
except:
return '', HTTPStatus.BAD_REQUEST
if _id < 1 or _id > 999999:
return '', HTTPStatus.BAD_REQUEST
# if _id in robots:
# status = HTTPStatus.NO_CONTENT # 204
# else:
# status = HTTPStatus.CREATED # 201
robots[_id] = body['position']
return '', HTTPStatus.NO_CONTENT
@app.route("/robot/<robot_id>/position", methods=['GET'])
def get_position(robot_id):
try:
_id = int(robot_id)
except:
return '', HTTPStatus.BAD_REQUEST
if _id < 1 or _id > 999999:
return '', HTTPStatus.BAD_REQUEST
if _id not in robots:
return '', HTTPStatus.NOT_FOUND
return jsonify(position=robots[_id]), HTTPStatus.OK
@app.route("/nearest", methods=['POST'])
def get_nearest():
body = request.get_json()
if "ref_position" not in body:
return '', HTTPStatus.BAD_REQUEST
k = 1
if "k" in body:
try:
k = int(body["k"])
except:
return '', HTTPStatus.BAD_REQUEST
ref = body["ref_position"]
try:
result = []
dis = 999999999999
d = [ (distance(position, ref), robot) for robot, position in robots.items()]
d.sort()
result = [i[1] for i in d[:k]]
# d = distance(position, ref)
# if dis > d:
# dis = d
# result = [robot]
except:
return '', HTTPStatus.BAD_REQUEST
return jsonify(robot_ids=result), HTTPStatus.OK
@app.route("/closestpair", methods=['GET'])
def closest_pair():
n = len(robots)
if len(robots) < 2:
return '', HTTPStatus.FAILED_DEPENDENCY
f = open('webapp/in', "w")
f.write(str(n)+'\n')
for robot, position in robots.items():
f.write(str(position["x"]) + " " + str(position["y"])+"\n")
f.close()
output = float(os.popen('webapp/closestpair < webapp/in').read())
return jsonify(distance=f"{output:.3f}"), HTTPStatus.OK
| StarcoderdataPython |
3236948 | <reponame>dewoolkaridhish4/C104<gh_stars>0
import csv
with open("height-weight.csv",newline="") as f:
reader=csv.reader(f)
filedata=list(reader)
filedata.pop(0)
newdata = []
for i in range(len(filedata)):
n_num=filedata[i][1]
newdata.append(float(n_num))
n=len(newdata)
newdata.sort()
if(n%2==0):
median1=float(newdata[n//2])
median2=float(newdata[n//2-1])
median=(median1+median2)/2
else:
median=newdata[n//2]
print("MEDIAN = " +str(median))
import csv
with open("Internet Users.csv",newline="") as f:
reader=csv.reader(f)
filedata=list(reader)
filedata.pop(0)
newdata = []
for i in range(len(filedata)):
n_num=filedata[i][1]
newdata.append(float(n_num))
n=len(newdata)
newdata.sort()
if(n%2==0):
median1=float(newdata[n//2])
median2=float(newdata[n//2-1])
median=(median1+median2)/2
else:
median=newdata[n//2]
print("MEDIAN = " +str(median)) | StarcoderdataPython |
3207510 | <gh_stars>0
"""
Command Account Name Resolve ..
"""
from .command_base import CommandBase
class AccountNameResolveCommand(CommandBase):
def __init__(self, sub_parser=None):
self._command_list = []
super().__init__('resolve', sub_parser)
def create_parser(self, sub_parser):
parser = sub_parser.add_parser(
self._name,
description='Get an address from an account name',
help='Get an address from an account name'
)
parser.add_argument(
'name',
help='account name to resolve'
)
return parser
def execute(self, args, output):
convex = self.load_convex(args.url)
address = convex.resolve_account_name(args.name)
if address:
output.add_line(f'address: {address}')
else:
output.add_line('not found')
output.set_value('address', address)
output.set_value('name', args.name)
| StarcoderdataPython |
3268048 | from pytest import raises
def test_empty_stack_default(new_stack):
assert new_stack.head is None
def test_empty_stack_pop(new_stack):
with raises(IndexError):
new_stack.pop()
def test_empty_stack_peek(new_stack):
with raises(IndexError):
new_stack.peek()
def test_empty_stack_has_size(new_stack):
assert len(new_stack) == 0
def test_data_stack_pop_changes_size(ordered_stack):
assert len(ordered_stack) == 13
assert ordered_stack.pop() == 39
assert len(ordered_stack) == 12
def test_data_stack_peek_no_mutate(ordered_stack):
assert len(ordered_stack) == 13
assert ordered_stack.peek() == 39
assert len(ordered_stack) == 13
assert ordered_stack.peek() == 39
def test_data_stack_pop(ordered_stack):
assert ordered_stack.pop() == 39
assert ordered_stack.pop() == 36
def test_data_stack_pop_exaust(ordered_stack):
while ordered_stack:
ordered_stack.pop()
assert len(ordered_stack) == 0
with raises(IndexError):
ordered_stack.pop()
def test_unordered_pop(unordered_stack):
assert unordered_stack.pop() == 6
assert unordered_stack.pop() == 1
assert unordered_stack.pop() == 3
assert unordered_stack.pop() == 5
def test_empty_stack_push(new_stack):
new_stack.push(0)
assert new_stack.head.value == 0
def test_empty_stack_push_multiple(new_stack):
for _ in range(30):
new_stack.push(0)
new_stack.push(1)
assert len(new_stack) == 31
assert new_stack.pop() == 1
def test_empty_stack_push_changes_size(new_stack):
assert len(new_stack) == 0
new_stack.push("test")
assert len(new_stack) == 1
| StarcoderdataPython |
1613608 | from django.urls import path
from rest_framework_simplejwt.views import (TokenObtainPairView,
TokenRefreshView)
from .views import signup, LessonView, ProfessorView
urlpatterns = [
path('v1/signup/', signup, name='signup'),
path('v1/token/',
TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('v1/token/refresh/',
TokenRefreshView.as_view(), name='token_refresh'),
path('v1/schedule/professor/', ProfessorView.as_view(), name='professor'),
path('v1/schedule/', LessonView.as_view(), name='schedule'),
]
| StarcoderdataPython |
163231 | <reponame>dkotschessa/PyPCOM
class CarTablePage(Page):
add_car_form = AddCarForm()
car_table = CarTable()
def add_car(self, car: Car):
self.add_car_form.add_car(car)
def remove_car(self, car: Car):
self.car_table.remove_car(car)
@property
def cars(self) -> List[Car]:
return self.car_table.cars
| StarcoderdataPython |
1613033 | <reponame>hemantborole/aws-iot-twinmaker-samples
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import setuptools
setuptools.setup(
name='TwinMakerSiteWiseUtil',
version='0.1',
description='Iot Twin Maker Getting Started package',
url='#',
author='max',
install_requires=[],
author_email='',
packages=setuptools.find_packages(),
zip_safe=False
) | StarcoderdataPython |
73820 | <gh_stars>0
# -*- coding: UTF-8 -*-
'''
Created on 25 f�vr. 2017
@author: HEAVYRAGE
'''
from abc import ABCMeta, abstractmethod
from flask_restful import fields
from _pyio import __metaclass__
class Constants(object):
'''
A constants abstract class
'''
__metaclass__ = ABCMeta
file_fields = {
'isdir': fields.Boolean,
'path': fields.String,
'name': fields.String
}
class Storage(object):
__metaclass__ = ABCMeta
@abstractmethod
def authenticate(self):
'''
Authenticate client API
@return: Nothing
'''
@abstractmethod
def logout(self):
'''
Logout from client API
@return: Nothing
'''
@abstractmethod
def getfolder(self, folder_path):
'''
Return list of items within a folder
:param folder_path: The full path of the folder
'''
@abstractmethod
def upload(self, filename, data, rpath):
'''
Upload a file to a given folder path
:param filename: filename of the uploaded file
:param data: data stream for the file to upload
:param path: relative path of the folder
'''
@abstractmethod
def createfolder(self, foldername, parent):
'''
Create a folder at the given path
:param foldername: foldername
:param path: full path of the parent folder
'''
@abstractmethod
def getshares(self):
'''
List all shares
'''
@abstractmethod
def parseError(self, jresponse):
'''
Parse the error response from the storage system
:param jresponse: response in a JSON format
:return: a JSON response that describes the error
'''
def char_protect(self, strs):
chars = ['"', '/', '[', ']', '{', '}']
for char in chars:
strs = strs.replace(char, '%'+char.encode("hex").upper())
return strs
def char_unprotect(self, strs):
count = strs.count('%')
i = 0
while i < count:
tmp = strs[strs.index('%')+1:3]
strs = strs.replace('%'+tmp, tmp.decode("hex"))
count = strs.count('%')
i = i + 1
return strs | StarcoderdataPython |
122392 | """
unit tests for GridSearch class
"""
import tigerforecast
from tigerforecast.utils.autotuning import GridSearch
from tigerforecast.utils.optimizers import *
import jax.numpy as np
import matplotlib.pyplot as plt
import itertools
def test_grid_search(show=False):
test_grid_search_arma(show=show)
test_grid_search_lstm(show=show)
print("test_grid_search passed")
def test_grid_search_lstm(show=False):
problem_id = "SP500-v0"
method_id = "LSTM"
problem_params = {} # {'p':4, 'q':1} # params for ARMA problem
method_params = {'n':1, 'm':1}
loss = lambda a, b: np.sum((a-b)**2)
search_space = {'l': [3, 4, 5, 6], 'h': [2, 5, 8], 'optimizer':[]} # parameters for ARMA method
opts = [Adam, Adagrad, ONS, OGD]
lr_start, lr_stop = -1, -3 # search learning rates from 10^start to 10^stop
learning_rates = np.logspace(lr_start, lr_stop, 1+2*np.abs(lr_start - lr_stop))
for opt, lr in itertools.product(opts, learning_rates):
search_space['optimizer'].append(opt(learning_rate=lr)) # create instance and append
trials, min_steps = 10, 100
hpo = GridSearch() # hyperparameter optimizer
optimal_params, optimal_loss = hpo.search(method_id, method_params, problem_id, problem_params, loss,
search_space, trials=trials, smoothing=10, min_steps=min_steps, verbose=show) # run each model at least 1000 steps
if show:
print("optimal params: ", optimal_params)
print("optimal loss: ", optimal_loss)
# test resulting method params
method = tigerforecast.method(method_id)
method.initialize(**optimal_params)
problem = tigerforecast.problem(problem_id)
x = problem.initialize(**problem_params)
loss = []
if show:
print("run final test with optimal parameters")
for t in range(5000):
y_pred = method.predict(x)
y_true = problem.step()
loss.append(mse(y_pred, y_true))
method.update(y_true)
x = y_true
if show:
print("plot results")
plt.plot(loss)
plt.show(block=False)
plt.pause(10)
plt.close()
def test_grid_search_arma(show=False):
problem_id = "ARMA-v0"
method_id = "AutoRegressor"
problem_params = {'p':3, 'q':2}
method_params = {}
loss = lambda a, b: np.sum((a-b)**2)
search_space = {'p': [1,2,3,4,5], 'optimizer':[]} # parameters for ARMA method
opts = [Adam, Adagrad, ONS, OGD]
lr_start, lr_stop = 0, -4 # search learning rates from 10^start to 10^stop
learning_rates = np.logspace(lr_start, lr_stop, 1+2*np.abs(lr_start - lr_stop))
for opt, lr in itertools.product(opts, learning_rates):
search_space['optimizer'].append(opt(learning_rate=lr)) # create instance and append
trials, min_steps = 25, 250
hpo = GridSearch() # hyperparameter optimizer
optimal_params, optimal_loss = hpo.search(method_id, method_params, problem_id, problem_params, loss,
search_space, trials=trials, smoothing=10, min_steps=min_steps, verbose=show) # run each model at least 1000 steps
if show:
print("optimal params: ", optimal_params)
print("optimal loss: ", optimal_loss)
# test resulting method params
method = tigerforecast.method(method_id)
method.initialize(**optimal_params)
problem = tigerforecast.problem(problem_id)
x = problem.initialize(**problem_params)
loss = []
if show:
print("run final test with optimal parameters")
for t in range(5000):
y_pred = method.predict(x)
y_true = problem.step()
loss.append(mse(y_pred, y_true))
method.update(y_true)
x = y_true
if show:
print("plot results")
plt.plot(loss)
plt.show(block=False)
plt.pause(10)
plt.close()
if __name__ == "__main__":
test_grid_search(show=True)
| StarcoderdataPython |