gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
#######################################################################################
# #
# This file is part of the updater4pyi Project. #
# #
# Copyright (C) 2013, Philippe Faist #
# [email protected] #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# 1. Redistributions of source code must retain the above copyright notice, this #
# list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR #
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
# #
#######################################################################################
import re
import sys
import os
import os.path
import datetime
from . import util
from .upd_defs import Updater4PyiError
from .upd_log import logger
class UpdateInterface(object):
def __init__(self, updater, progname=None, **kwargs):
self.updater = updater
self.progname = progname
super(UpdateInterface, self).__init__(**kwargs)
def start(self, **kwargs):
"""
Start being aware of wanting to check for updates. It is up to the interface
to decide when to check for updates, how often, etc. For example, a console
interface would check right away, while a GUI might first load the application,
and set a timer to check later, so that startup is not slowed down by the update
check.
"""
raise NotImplementedError
# -----------
class UpdateConsoleInterface(UpdateInterface):
"""
A very simple :py:class:`UpdateInterface` implementation that checks for updates each
time the program is run. This is mostly meant for debugging purposes.
"""
def __init__(self, updater, ask_before_checking=False, **kwargs):
super(UpdateConsoleInterface, self).__init__(updater=updater, **kwargs)
self.ask_before_checking = ask_before_checking
def start(self):
try:
self._runupdatecheck();
except Updater4PyiError as e:
print "\n"
print "------------------------------------------------------------"
print "Error: %s" % (e)
print "Software update aborted."
print "------------------------------------------------------------"
print "\n"
return
# return to the main program.
return
def _runupdatecheck(self):
#
# See if we should ask before checking.
#
if (self.ask_before_checking):
if (not self._ynprompt("Do you wish to check for software updates? (y/n) ")):
return
#
# Check for updates.
#
upd_info = self.updater.check_for_updates()
if (upd_info is None):
# no updates.
print "No updates available."
return
#
# There's an update, prompt the user.
#
print ""
print "-----------------------------------------------------------"
print ""
print ("A new software update is available (%sversion %s)"
% (self.progname+" " if self.progname else "", upd_info.version))
print ""
if (self._ynprompt("Do you want to install it? (y/n) ")):
#
# yes, install update
#
self.updater.install_update(upd_info)
#
# update installed.
#
print ""
print "Update installed. Quitting. Please restart the program."
print ""
print "-----------------------------------------------------------"
print ""
sys.exit(0)
else:
print ""
print "Not installing update."
print ""
print "-----------------------------------------------------------"
print ""
def _ynprompt(self, msg):
yn = raw_input(msg)
return re.match(r'\s*y(es)?\s*', yn, re.IGNORECASE) is not None
# ---------------------------------------------------------------------
# initial check by default 1 minute after app startup, so as not to slow down app startup.
DEFAULT_INIT_CHECK_DELAY = datetime.timedelta(days=0, seconds=60, microseconds=0)
# subsequent checks every week by default
DEFAULT_CHECK_INTERVAL = datetime.timedelta(days=7, seconds=0, microseconds=0)
_SETTINGS_ALL = ['check_for_updates_enabled', 'init_check_delay', 'check_interval',
'last_check']
class UpdateGenericGuiInterface(UpdateInterface):
def __init__(self, updater, ask_before_checking=True, **kwargs):
super(UpdateGenericGuiInterface, self).__init__(updater, **kwargs)
self.ask_before_checking = ask_before_checking;
self.update_installed = False
self.is_initial_delay = True
self.is_currently_checking = False
# load settings
d = self.load_settings(self._settings_all_keys())
self.init_check_delay = util.ensure_timedelta(d.get('init_check_delay', DEFAULT_INIT_CHECK_DELAY))
self.check_interval = util.ensure_timedelta(d.get('check_interval', DEFAULT_CHECK_INTERVAL))
try:
val = d.get('check_for_updates_enabled', True)
self.check_for_updates_enabled = util.getbool(val)
except ValueError:
logger.warning("Couldn't parse config value for `check_for_updates_enabled': %r", val)
self.check_for_updates_enabled = False
self.last_check = util.ensure_datetime(d.get('last_check', datetime.datetime(1970, 1, 1)))
if (self.ask_before_checking):
self.asked_before_checking = d.get('asked_before_checking', False);
def start(self):
logger.debug("Starting interface (generic gui)")
self.schedule_next_update_check()
# properties
def initCheckDelay(self):
return self.init_check_delay
def setInitCheckDelay(self, init_check_delay, save=True):
self.init_check_delay = util.ensure_timedelta(init_check_delay)
if save:
self.save_settings({'init_check_delay': self.init_check_delay})
def checkInterval(self):
return self.check_interval
def setCheckInterval(self, check_interval, save=True):
self.check_interval = util.ensure_timedelta(check_interval)
if save:
self.save_settings({'check_interval': self.check_interval})
def checkForUpdatesEnabled(self):
return self.check_for_updates_enabled
def setCheckForUpdatesEnabled(self, enabled, save=True, schedule_check=True):
self.check_for_updates_enabled = util.getbool(enabled)
# save setting to settings file
if save:
self.save_settings({'check_for_updates_enabled': self.check_for_updates_enabled})
# also, schedule the next update check
if schedule_check:
self.schedule_next_update_check()
def lastCheck(self):
return self.last_check
def setLastCheck(self, last_check, save=True):
self.last_check = util.ensure_datetime(last_check)
if save:
self.save_settings({'last_check': self.last_check})
# ------------
def all_settings(self):
"""
Utility to get all settings. Useful for subclasses; this doesn't need to be reimplemented.
"""
return dict([(k,getattr(self,k)) for k in self._settings_all_keys()])
def _settings_all_keys(self):
"""
Returns a list of relevant settings keys for this object. Includes `'aksed_before_checking'` only
if the `ask_before_checking` argument given to the constructor was `True`.
"""
return _SETTINGS_ALL + (['asked_before_checking'] if self.ask_before_checking else [])
# ----------------------------------------------
def check_for_updates(self):
"""
Perform a possible update check. You don't have to reimplement this function, the default
implementation should be good enough and relies on your implementations of `ask_to_update()`
and `ask_to_restart()`.
If the update check isn't due yet, this function does not do the update check. If
you want to force an update check, call `do_check_for_updates()`.
"""
logger.debug("UpdateGenericGuiInterface: check_for_updates()")
if (self.update_installed):
logger.warning("We have already installed an update and pending restart.")
return
logger.debug("self.is_initial_delay=%r, self.timedelta_remaining_to_next_check()=%r",
self.is_initial_delay, self.timedelta_remaining_to_next_check())
# if we were called just after the initial delay, reset this flag.
if (self.is_initial_delay):
self.is_initial_delay = False
# now, really check that we are due for software update check.
# * we might be after an initial delay, and while the app was just started, updates are checked
# for eg. once a month and a check is not yet due, yet this function is called by timeout.
# * even if we are not at the initial delay, the user may have disabled update checks in the
# settings between the scheduling of the update check and now.
if (not self.is_check_now_due()):
# software update check is not yet due (not even in the next 10 seconds). Just
# schedule next check.
logger.debug("Update check is not yet due. Postpone.")
self.schedule_next_update_check()
return
try:
self.do_check_for_updates()
finally:
self.schedule_next_update_check()
def do_check_for_updates(self):
"""
Actually perform the udpate check. Call this function if you want to force an
update check even though it's not yet due. If you want to periodically possibly
check only if a check is due, then call `check_for_updates()` instead.
Returns:
- `None` if we asked the user for the first time if they want to check
regularly for updates, and they refused.
- `False` if no new update is available
- a tuple if a new update is available:
- `(True, rel_info)` if the user installed the update but did not
restart the app;
- `(False, rel_info)` if the user declined to install the update now
- the tuple `(False, None, error_str)` if an error occurred while checking
for updates.
"""
if self.is_currently_checking:
return None
try:
self.is_currently_checking = True
# check for updates
if (self.ask_before_checking and not self.asked_before_checking):
# ask before we check.
logger.debug("UpdateGenericGuiInteface: this is the first time. Let's ask the user "
"if (s)he's cool with us auto-updating..")
answer = self.ask_first_time()
self.setCheckForUpdatesEnabled(answer, save=False);
self.asked_before_checking = True
self.save_settings({
'asked_before_checking': True,
'check_for_updates_enabled': answer,
})
if (answer != True):
logger.debug("UpdateGenericGuiInterface: are told not to check for updates.");
return None
rel_info = self.updater.check_for_updates()
if (rel_info is None):
# no updates.
logger.debug("UpdateGenericGuiInterface: No updates available.")
return False
logger.debug("Update (version %s) is available.", rel_info.get_version())
#
# There's an update, prompt the user.
#
if self.ask_to_update(rel_info):
#
# yes, install update
#
# make sure we save our settings now in case we restart later
#
self.save_settings()
#
# And actually install the update.
#
self.updater.install_update(rel_info)
self.update_installed = True
#
# update installed.
#
if self.ask_to_restart():
self.updater.restart_app()
return (True, rel_info) # whatever, our app will have exited anyway
# return to the main program.
return (True, rel_info)
logger.debug("UpdateGenericGuiInterface: Not installing update.")
# return to the main program.
return (False, rel_info)
except Updater4PyiError as e:
logger.warning("Error while checking for updates: %s", e)
return (False, None, unicode(e))
finally:
self.last_check = datetime.datetime.now()
self.save_settings({'last_check': self.last_check})
self.is_currently_checking = False
def is_check_now_due(self, tolerance=datetime.timedelta(days=0, seconds=10)):
return (self.check_for_updates_enabled and
self.timedelta_remaining_to_next_check() <= tolerance)
def schedule_next_update_check(self):
if not self.check_for_updates_enabled:
logger.debug("UpdateGenericGuiInterface: Not scheduling update check because we were "
"asked not to check for updates.")
return
if self.is_currently_checking:
logger.debug("UpdateGenericGuiInteface: Not scheduling update check because we're currently "
"checking for updates!")
if (self.is_initial_delay):
self.set_timeout_check(self.init_check_delay)
logger.debug("UpdateGenericGuiInterface: requested initial single-shot timer for %r seconds"
%(self.init_check_delay))
else:
timedelta_remaining = self.timedelta_remaining_to_next_check()
if (timedelta_remaining <= datetime.timedelta(0)):
logger.debug("UpdateGenericGuiInterface: software update check due now already, checking")
self.check_for_updates()
return
self.set_timeout_check(timedelta_remaining)
logger.debug("UpdateGenericGuiInterface: requested single-shot timer for %r",
timedelta_remaining)
def timedelta_remaining_to_next_check(self):
return ((self.last_check + self.check_interval) - datetime.datetime.now())
# ------------------------------------------------------------------------------
# the following methods need to be reimplemented, using the gui toolkit at hand.
# ------------------------------------------------------------------------------
def ask_first_time(self):
"""
Subclasses should prompt the user whether they want to regularly look for updates.
This is prompted to the user only if the main program set `ask_before_checking` to `True` in the
constructor of this object.
Return TRUE if the program should regularly check for updates, or FALSE if not.
"""
raise NotImplementedError
def ask_to_update(self, rel_info):
"""
Subclasses should prompt the user whether they want to install the update `rel_info` or not.
Note: Interfaces may also present additional buttons such as "Never check for updates", or
"Skip this update", and set properties and/or settings accordingly with e.g.
`setCheckForUpdatesEnabled()`.
Return TRUE if the program should be restarted, or FALSE if not.
"""
raise NotImplementedError
def ask_to_restart(self):
"""
Subclasses should prompt the user to restart the program after a successful update.
Return TRUE if the program should be restarted, or FALSE if not.
"""
raise NotImplementedError
def set_timeout_check(self, interval_timedelta):
"""
Subclasses should reimplement this function to call the function `check_for_updates()` after
`interval_timedelta`. `interval_timedelta` is a `datetime.timedelta` object.
"""
raise NotImplementedError
def load_settings(self, keylist):
"""
Subclasses may reimplement this function to cusomize where and how the settings are stored,
usually using a toolbox-specific utility, such as QSettings in PyQt4.
"""
raise NotImplementedError
def save_settings(self, d=None):
"""
Save the given settings in the dictionary `d` to some local settings. If d is None, then
all settings should be saved, effectively taking `d` to be the dictionary returned by
`all_settings()`.
"""
raise NotImplementedError
|
|
"""
DNF plugin for getting the Python 3 porting status.
Put this in your DNF plugin directory, then run:
$ dnf --enablerepo=rawhide --enablerepo=rawhide-source py3query -o fedora.json
This will give you a file "fedora.json" with information for portingdb.
"""
from __future__ import print_function
import sys
import json
import collections
import time
import hawkey
import dnf
import dnf.cli
import dnf.subject
from dnfpluginscore import _
import bugzilla # python-bugzilla
import yaml
BUGZILLA_URL = 'bugzilla.redhat.com'
# Tracker bugs which are used to find all relevant package bugs
TRACKER_BUGS = {
1698500: "F31_PY2REMOVAL",
1700324: "F31FailsToInstall",
1708725: "PYTHON2_EOL",
1803205: "BRPY27",
1927309: "F35FTBFS",
1927313: "F35FailsToInstall",
}
# Bugzilla statuses that indicate the bug was filed in error
NOTABUG_STATUSES = {'CLOSED NOTABUG', 'CLOSED WONTFIX', 'CLOSED CANTFIX'}
# Template URL to which you can add the bug ID and get a working URL
BUGZILLA_BUG_URL = "https://bugzilla.redhat.com/show_bug.cgi?id={}"
SEED_PACKAGES = {
2: [
'python2-devel', 'python2', 'python2-libs', 'python2-tkinter',
'python(abi) = 2.7', '/usr/bin/python2', 'python27', 'python2.7',
'/usr/bin/python2.7', 'libpython2.7.so.1.0', 'libpython2.7.so.1.0()(64bit)',
'pygtk2', 'pygobject2',
],
3: [
'python3-devel', 'python3', 'python3-libs', 'python3-tkinter',
'python-devel', 'python', 'python-libs',
'/usr/bin/python', '/usr/bin/python3',
'python(abi) = 3.4', '/usr/bin/python3.4', 'libpython3.4m.so.1.0',
'libpython3.so', 'python3-cairo',
'python(abi) = 3.5', '/usr/bin/python3.5', 'libpython3.5m.so.1.0',
'libpython3.5m.so.1.0()(64bit)',
'python(abi) = 3.6', '/usr/bin/python3.6', 'libpython3.6m.so.1.0',
'libpython3.6m.so.1.0()(64bit)',
'python(abi) = 3.7', '/usr/bin/python3.7', 'libpython3.7m.so.1.0',
'libpython3.7m.so.1.0()(64bit)',
'python(abi) = 3.8', '/usr/bin/python3.8', 'libpython3.8.so.1.0',
'libpython3.8.so.1.0()(64bit)',
'python(abi) = 3.9', '/usr/bin/python3.9', 'libpython3.9.so.1.0',
'libpython3.9.so.1.0()(64bit)',
]
}
PROVIDES_BLACKLIST = (
'postscriptdriver(', 'pkgconfig(', 'perl(', 'mvn(', 'mimehandler(',
'config(', 'bundled(', 'application(', 'appdata(',
)
BUGZILLA_PAGE_SIZE = 1000
class Py3Query(dnf.Plugin):
name = 'py3query'
def __init__(self, base, cli):
super(Py3Query, self).__init__(base, cli)
self.base = base
self.cli = cli
if self.cli is not None:
self.cli.register_command(Py3QueryCommand)
def progressbar(seq, text, namegetter=str):
total = len(seq)
prev_len = 0
def printer(i, name):
pad_len = prev_len - len(name)
total_len = 20
if total:
progress = ('=' * (total_len * i // total)).ljust(total_len)
else:
progress = '=' * total_len
if i == 0:
r = ''
else:
r = '\r'
line = '{}[{}] {}/{} {}: {}{} '.format(
r, progress, i, total, text, name, ' ' * pad_len)
print(line, end='', file=sys.stderr)
sys.stderr.flush()
return len(name)
try:
for i, item in enumerate(seq):
prev_len = printer(i, str(namegetter(item)))
yield item
except GeneratorExit:
pass
except:
printer(i, 'Error!')
print(file=sys.stderr)
raise
printer(total, 'Done!')
print(file=sys.stderr)
def have_binaries(packages):
"""Check if there are any binaries (executables) in the packages.
Return: (bool) True if packages have any binaries, False otherwise
"""
for pkg in packages:
for filepath in pkg.files:
if filepath.startswith(('/usr/bin', '/usr/sbin')):
return True
return False
def set_status(result, pkgs, python_versions):
# Look at the Python dependencies of given packages, based on the
# name only (this means different arches are grouped into one)
name_by_version = collections.defaultdict(set)
pkg_by_version = collections.defaultdict(set)
name_by_arch = collections.defaultdict(set)
for p in pkgs:
name_by_arch[p.arch].add(f'{p.name}.{p.arch}')
for v in python_versions[p]:
name_by_version[v].add(f'{p.name}.{p.arch}')
pkg_by_version[v].add(p)
if (name_by_version[2] & name_by_version[3]) - name_by_arch['src']:
# If a package depends on *both* py2 and py3, it's not ported
result['status'] = 'mispackaged'
result['note'] = (
'A single package depends on both Python 2 and '
'Python 3.\n'
'It should be split into a python2 and python3 subpackages '
'to prevent it from always dragging the py2 dependency in.')
elif not name_by_version[2]:
# Hooray!
result['status'] = 'py3-only'
else:
# Otherwise, a srpm isn't ported if it has more packages that need py2
# than those that need py3
if len(name_by_version[3]) >= len(name_by_version[2]):
if have_binaries(pkg_by_version[2]) and not have_binaries(pkg_by_version[3]):
# Identify packages with py2 only binaries.
result['status'] = 'mispackaged'
result['nonblocking'] = True
result['note'] = (
'The Python 3 package is missing binaries available '
'in a Python 2 package.\n')
elif all(
result['rpms'][format_rpm_name(pkg)]['almost_leaf']
or result['rpms'][format_rpm_name(pkg)]['arch'] == 'src'
for pkg in pkg_by_version[2]
) and any(
result['rpms'][format_rpm_name(pkg)]['arch'] != 'src'
for pkg in pkg_by_version[2]
):
# Packages with py2 subpackages not required by anything.
# (source packages don't count)
result['status'] = 'legacy-leaf'
else:
result['status'] = 'released'
else:
result['status'] = 'idle'
def format_rpm_name(pkg):
if pkg.epoch:
epoch = '{}:'.format(pkg.epoch)
else:
epoch = ''
return '{pkg.name}-{epoch}{pkg.version}-{pkg.release}.{pkg.arch}'.format(
pkg=pkg, epoch=epoch)
def get_srpm_name(pkg):
return hawkey.split_nevra(pkg.sourcerpm).name if pkg.sourcerpm else pkg.name
def get_srpm_names(pkgs):
return {get_srpm_name(pkg) for pkg in pkgs}
def chunks(sequence, size=1000):
"""Yield chunks of given size (1000) from a sequence until exhausted.
The last chunk might be smaller."""
for start in range(0, len(sequence), size):
yield sequence[start:start + size]
class Py3QueryCommand(dnf.cli.Command):
"""The util command there is extending the dnf command line."""
aliases = ('py3query',)
summary = _('query the python3 porting status')
usage = _('[OPTIONS] [KEYWORDS]')
def configure(self):
"""Setup the demands."""
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
@staticmethod
def set_argparser(parser):
"""Parse command line arguments."""
parser.add_argument('--output', '-o', metavar='FILE', action='store',
help=_('write output to the given file'))
parser.add_argument('--no-bz', dest='fetch_bugzilla', action='store_false',
default=True, help=_("Don't get Bugzilla links"))
parser.add_argument('--misnamed', dest='py3query_misnamed', action='store',
help=_("YAML file with old misnamed packages"))
parser.add_argument('--repo-groups', dest='repo_groups_file',
default=None, metavar='FILE', action='store',
help=_("Optional filename of a 'groups.json' file "
"that will record which package comes from "
"which repositories"))
def run(self):
self.base_query = self.base.sack.query()
self.pkg_query = self.base_query.filter(arch__neq=['src'])
self.src_query = self.base_query.filter(arch=['src'])
# python_versions: {package: set of Python versions}
python_versions = collections.defaultdict(set)
# rpm_pydeps: {package: set of dep names}
rpm_pydeps = collections.defaultdict(set)
# dep_versions: {dep name: Python version}
dep_versions = collections.defaultdict(set)
for n, seeds in SEED_PACKAGES.items():
provides = sorted(self.all_provides(seeds), key=str)
# This effectively includes packages that still need
# Python 3.4 while Rawhide only provides Python 3.5
provides += sorted(seeds)
for dep in progressbar(provides, 'Getting py{} requires'.format(n)):
dep_versions[str(dep)] = n
for pkg in self.whatrequires(dep, self.base_query):
python_versions[pkg].add(n)
rpm_pydeps[pkg].add(str(dep))
# Add packages with 'python?' as a component of their name, if they
# haven't been added as dependencies
for name, version in {
'python': 0,
'python2': 2,
'python3': 3,
}.items():
for pattern in '{}-*', '*-{}', '*-{}-*':
name_glob = pattern.format(name)
query = self.base_query.filter(name__glob=name_glob)
message = 'Getting {} packages'.format(name_glob)
for pkg in progressbar(query, message):
if pkg.sourcerpm and pkg.sourcerpm.startswith('mingw-'):
# Ignore mingw packages
continue
if pkg not in python_versions:
python_versions[pkg].add(version)
# add python2.7 package manually, it doesn't require Python 2, but it is
for py2name in 'python27', 'python2.7':
query = self.pkg_query.filter(name=py2name)
for pkg in query:
python_versions[pkg].add(2)
# srpm_names: {package: srpm name}
# by_srpm_name: {srpm name: set of packages}
srpm_names = {}
by_srpm_name = collections.defaultdict(set)
# repo_srpms: {repo name: set of srpm names}
repo_srpms = {}
for pkg in progressbar(python_versions.keys(), 'Getting SRPMs'):
srpm_name = get_srpm_name(pkg)
srpm_names[pkg] = srpm_name
by_srpm_name[srpm_name].add(pkg)
repo_srpms.setdefault(pkg.reponame, set()).add(srpm_name)
old_misnamed = {}
old_misnamed_flat = {}
if self.opts.py3query_misnamed:
with open(self.opts.py3query_misnamed) as f:
old_misnamed = yaml.safe_load(f)
old_misnamed_flat = {pk: pr for pkg in old_misnamed
for pr, pk in old_misnamed[pkg].items()}
# deps_of_pkg: {package: set of packages}
deps_of_pkg = collections.defaultdict(set)
# build_deps_of_srpm: {srpm: set of packages}
build_deps_of_srpm = collections.defaultdict(set)
# requirers_of_pkg: {package: set of srpm}
requirers_of_pkg = collections.defaultdict(set)
# build_requirers_of_pkg: {pkg: set of srpm}
build_requirers_of_pkg = collections.defaultdict(set)
# all_provides: {provide_name: package}
all_provides = {str(r).split()[0]: p for p in python_versions for r in p.provides
if not str(r).startswith(PROVIDES_BLACKLIST)}
for pkg in progressbar(sorted(python_versions.keys()), 'Getting requirements'):
if python_versions[pkg] == {3}:
continue
if pkg.arch == 'src':
continue
reqs = set()
build_reqs = set()
provides = set(pkg.provides)
for provide in pkg.provides:
str_provide = str(provide).split(' ')[0]
if str_provide in old_misnamed_flat:
provides.add(old_misnamed_flat[str_provide])
for provide in provides:
reqs.update(self.whatrequires(provide, self.pkg_query))
build_reqs.update(self.whatrequires(provide, self.src_query))
for req in reqs:
if req in python_versions.keys():
deps_of_pkg[req].add(pkg)
# Both Python and non-Python packages here.
requirers_of_pkg[pkg].add(req)
for req in build_reqs:
if req.name in by_srpm_name.keys():
build_deps_of_srpm[req.name].add(pkg)
# Both Python and non-Python packages here.
build_requirers_of_pkg[pkg].add(req)
# unversioned_requirers: {srpm_name: set of srpm_names}
unversioned_requirers = collections.defaultdict(set)
for pkg in progressbar(set.union(*requirers_of_pkg.values(), *build_requirers_of_pkg.values()),
'Processing packages with ambiguous dependencies'):
# Ignore packages that are:
if (python_versions.get(pkg) == {3} or # Python 3 only
pkg.name.endswith('-doc')): # Documentation
continue
for require in (pkg.requires + pkg.requires_pre + pkg.recommends +
pkg.suggests + pkg.supplements + pkg.enhances):
require = str(require).split()[0]
real_require = require
try:
require = old_misnamed[pkg.name][real_require]
except KeyError:
pass
requirement = all_provides.get(require)
# json_output: {srpm name: info}
json_output = dict()
for name in progressbar(by_srpm_name, 'Generating output'):
pkgs = sorted(by_srpm_name[name])
r = json_output[name] = {}
r['rpms'] = {
format_rpm_name(p): {
'py_deps': {str(d): dep_versions[d] for d in rpm_pydeps[p]},
'non_python_requirers': {
'build_time': sorted(get_srpm_names(build_requirers_of_pkg[p]) - by_srpm_name.keys()),
'run_time': sorted(get_srpm_names(requirers_of_pkg[p]) - by_srpm_name.keys()),
},
'almost_leaf': (
# not SRPM and is Python 2 and is not required by anything EXCEPT
# sibling subpackages
p.arch != 'src' and
2 in python_versions[p] and
not get_srpm_names(build_requirers_of_pkg[p] | requirers_of_pkg[p]) - {name}
),
'legacy_leaf': (
# not SRPM and is Python 2 and is not required by anything
p.arch != 'src' and
2 in python_versions[p] and
not get_srpm_names(build_requirers_of_pkg[p] | requirers_of_pkg[p])
),
'arch': p.arch,
} for p in pkgs}
set_status(r, pkgs, python_versions)
r['deps'] = sorted(set(srpm_names[d]
for p in pkgs
for d in deps_of_pkg.get(p, '')
if srpm_names[d] != name))
r['build_deps'] = sorted(set(srpm_names[d]
for d in build_deps_of_srpm.get(name, '')
if srpm_names[d] != name))
if unversioned_requirers.get(name):
r['unversioned_requirers'] = sorted(unversioned_requirers[name])
# add Bugzilla links
if self.opts.fetch_bugzilla:
bar = iter(progressbar(['connecting', 'tracker'],
'Getting bug list'))
next(bar)
bz = bugzilla.RHBugzilla(BUGZILLA_URL)
next(bar)
include_fields = ['id', 'depends_on', 'blocks', 'component',
'status', 'resolution', 'last_change_time',
'short_desc']
if len(TRACKER_BUGS) >= 1000:
raise NotImplementedError('Too many trackers')
trackers = bz.getbugs(TRACKER_BUGS,
include_fields=include_fields)
all_ids = set(b for t in trackers for b in t.depends_on)
bar.close()
bugs = []
for chunk in progressbar(
list(chunks(sorted(all_ids), BUGZILLA_PAGE_SIZE)),
'Getting bugs',
namegetter=lambda ids:f'{min(ids)}-{max(ids)}',
):
bugs += bz.getbugs(chunk, include_fields=include_fields)
def bug_namegetter(bug):
return '{bug.id} {bug.status} {bug.component}'.format(bug=bug)
rank = ['NEW', 'ASSIGNED', 'POST', 'MODIFIED', 'ON_QA', 'VERIFIED',
'RELEASE_PENDING', 'CLOSED']
def key(bug):
return rank.index(bug.status), bug.last_change_time
bugs = sorted(bugs, key=key)
for bug in progressbar(bugs, 'Merging bugs',
namegetter=bug_namegetter):
r = json_output.get(bug.component, {})
bugs = r.setdefault('bugs', {})
entry = bugs.get(bug.id)
if not entry:
entry = {
'url': bug.weburl,
'short_desc': bug.short_desc,
'status': bug.status,
'resolution': bug.resolution,
'last_change': time.strftime(
'%Y-%m-%d %H:%M:%S',
bug.last_change_time.timetuple()),
'trackers': [],
}
for tb in bug.blocks:
alias = TRACKER_BUGS.get(tb)
if alias:
entry['trackers'].append(alias)
bugs[bug.id] = entry
# Print out output
if self.opts.output:
with open(self.opts.output, 'w') as f:
json.dump(json_output, f, indent=2, sort_keys=True)
else:
json.dump(json_output, sys.stdout, indent=2, sort_keys=True)
sys.stdout.flush()
# Write out a groups.json
if self.opts.repo_groups_file:
output = {repo_name: {'name': repo_name,
'packages': sorted(srpm_names)}
for repo_name, srpm_names in repo_srpms.items()}
with open(self.opts.repo_groups_file, 'w') as f:
json.dump(output, f, indent=2, sort_keys=True)
def all_provides(self, seeds):
pkgs = set()
for seed in seeds:
query = dnf.subject.Subject(seed, ignore_case=True).get_best_query(
self.base.sack, with_provides=False)
pkgs.update(query.run())
provides = set()
for pkg in sorted(pkgs):
provides.update(pkg.provides)
return provides
def whatrequires(self, dep, query):
query = query.filter(requires=dep)
return set(query)
|
|
import os
import shutil
import re
from fontInfoData import getAttrWithFallback, intListToNum, normalizeStringForPostscript
from outlineOTF import OutlineOTFCompiler
from featureTableWriter import FeatureTableWriter, winStr, macStr
from kernFeatureWriter import KernFeatureWriter
try:
sorted
except NameError:
def sorted(l):
l = list(l)
l.sort()
return l
class MakeOTFPartsCompiler(object):
"""
This object will create the "parts" needed by the FDK.
The only external method is :meth:`ufo2fdk.tools.makeotfParts.compile`.
There is one attribute, :attr:`ufo2fdk.tools.makeotfParts.path`
that may be referenced externally. That is a dictionary of
paths to the various parts.
When creating this object, you must provide a *font*
object and a *path* indicating where the parts should
be saved. Optionally, you can provide a *glyphOrder*
list of glyph names indicating the order of the glyphs
in the font. You may also provide an *outlineCompilerClass*
argument that will serve as the outline source compiler.
The class passed for this argument must be a subclass of
:class:`ufo2fdk.tools.outlineOTF.OutlineOTFCompiler`.
"""
def __init__(self, font, path, features=None, glyphOrder=None, outlineCompilerClass=OutlineOTFCompiler):
self.font = font
self.path = path
self.log = []
self.outlineCompilerClass = outlineCompilerClass
# store the path to an eventual custom feature file
self.features = features
# store the glyph order
if glyphOrder is None:
glyphOrder = sorted(font.keys())
self.glyphOrder = glyphOrder
# make the paths for all files
self.paths = dict(
outlineSource=os.path.join(path, "font.otf"),
menuName=os.path.join(path, "menuname"),
glyphOrder=os.path.join(path, "glyphOrder"),
fontInfo=os.path.join(path, "fontinfo"),
features=os.path.join(path, "features")
)
def compile(self):
"""
Compile the parts.
"""
# set up the parts directory removing
# an existing directory if necessary.
if os.path.exists(self.path):
shutil.rmtree(self.path)
os.mkdir(self.path)
# build the parts
self.setupFile_outlineSource(self.paths["outlineSource"])
self.setupFile_menuName(self.paths["menuName"])
self.setupFile_glyphOrder(self.paths["glyphOrder"])
self.setupFile_fontInfo(self.paths["fontInfo"])
self.setupFile_features(self.paths["features"])
def setupFile_outlineSource(self, path):
"""
Make the outline source file.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
c = self.outlineCompilerClass(self.font, path, self.glyphOrder)
c.compile()
self.log += c.log
def setupFile_menuName(self, path):
"""
Make the menu name source file. This gets the values for
the file using the fallback system as described below:
==== ===
[PS] postscriptFontName
f= openTypeNamePreferredFamilyName
s= openTypeNamePreferredSubfamilyName
l= styleMapFamilyName
m=1, openTypeNameCompatibleFullName
==== ===
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
psName = getAttrWithFallback(self.font.info,"postscriptFontName")
lines = [
"[%s]" % psName
]
# family name
familyName = getAttrWithFallback(self.font.info,"openTypeNamePreferredFamilyName")
encodedFamilyName = winStr(familyName)
lines.append("f=%s" % encodedFamilyName)
if encodedFamilyName != familyName:
lines.append("f=1,%s" % macStr(familyName))
# style name
styleName = getAttrWithFallback(self.font.info,"openTypeNamePreferredSubfamilyName")
encodedStyleName = winStr(styleName)
lines.append("s=%s" % encodedStyleName)
if encodedStyleName != styleName:
lines.append("s=1,%s" % macStr(styleName))
# compatible name
winCompatible = getAttrWithFallback(self.font.info,"styleMapFamilyName")
## the second qualification here is in place for Mac Office <= 2004.
## in that app the menu name is pulled from name ID 18. the font
## may have standard naming data that combines to a length longer
## than the app can handle (see Adobe Tech Note #5088). the designer
## may have created a specific openTypeNameCompatibleFullName to
## get around this problem. sigh, old app bugs live long lives.
if winCompatible != familyName or self.font.info.openTypeNameCompatibleFullName is not None:
# windows
l = "l=%s" % normalizeStringForPostscript(winCompatible)
lines.append(l)
# mac
macCompatible = getAttrWithFallback(self.font.info,"openTypeNameCompatibleFullName")
l = "m=1,%s" % macStr(macCompatible)
lines.append(l)
text = "\n".join(lines) + "\n"
f = open(path, "wb")
f.write(text)
f.close()
def setupFile_glyphOrder(self, path):
"""
Make the glyph order source file.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
lines = []
for glyphName in self.glyphOrder:
if glyphName in self.font and self.font[glyphName].unicode is not None:
code = self.font[glyphName].unicode
code = "%04X" % code
if len(code) <= 4:
code = "uni%s" % code
else:
code = "u%s" % code
line = "%s %s %s" % (glyphName, glyphName, code)
else:
line = "%s %s" % (glyphName, glyphName)
lines.append(line)
text = "\n".join(lines) + "\n"
f = open(path, "wb")
f.write(text)
f.close()
def setupFile_fontInfo(self, path):
"""
Make the font info source file. This gets the values for
the file using the fallback system as described below:
========================== ===
IsItalicStyle styleMapStyleName
IsBoldStyle styleMapStyleName
PreferOS/2TypoMetrics openTypeOS2Selection
IsOS/2WidthWeigthSlopeOnly openTypeOS2Selection
IsOS/2OBLIQUE openTypeOS2Selection
========================== ===
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
lines = []
# style mapping
styleMapStyleName = getAttrWithFallback(self.font.info,"styleMapStyleName")
if styleMapStyleName in ("italic", "bold italic"):
lines.append("IsItalicStyle true")
else:
lines.append("IsItalicStyle false")
if styleMapStyleName in ("bold", "bold italic"):
lines.append("IsBoldStyle true")
else:
lines.append("IsBoldStyle false")
# fsSelection bits
selection = getAttrWithFallback(self.font.info,"openTypeOS2Selection")
if 7 in selection:
lines.append("PreferOS/2TypoMetrics true")
else:
lines.append("PreferOS/2TypoMetrics false")
if 8 in selection:
lines.append("IsOS/2WidthWeigthSlopeOnly true")
else:
lines.append("IsOS/2WidthWeigthSlopeOnly false")
if 9 in selection:
lines.append("IsOS/2OBLIQUE true")
else:
lines.append("IsOS/2OBLIQUE false")
# write the file
if lines:
f = open(path, "wb")
f.write("\n".join(lines))
f.close()
def setupFile_features(self, path):
"""
Make the features source file. If any tables
or the kern feature are defined in the font's
features, they will not be overwritten.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
# force absolute includes into the features
if self.font.path is None:
existingFeaturePath = None
existing = self.font.features.text
if existing is None:
existing = ""
elif self.features is not None:
existingFeaturePath = os.path.normpath(os.path.join(self.font.path, self.features))
with open(existingFeaturePath, "r") as fea:
text = fea.read()
existing = forceAbsoluteIncludesInFeatures(text, os.path.dirname(existingFeaturePath))
else:
existingFeaturePath = os.path.join(self.font.path, "features.fea")
existing = forceAbsoluteIncludesInFeatures(self.font.features.text, os.path.dirname(self.font.path))
# break the features into parts
features, tables = extractFeaturesAndTables(existing, scannedFiles=[existingFeaturePath])
# build tables that are not in the existing features
autoTables = {}
if "head" not in tables:
autoTables["head"] = self.writeFeatures_head()
if "hhea" not in tables:
autoTables["hhea"] = self.writeFeatures_hhea()
if "OS/2" not in tables:
autoTables["OS/2"] = self.writeFeatures_OS2()
if "name" not in tables:
autoTables["name"] = self.writeFeatures_name()
# build the kern feature if necessary
autoFeatures = {}
if "kern" not in features and len(self.font.kerning):
autoFeatures["kern"] = self.writeFeatures_kern()
# write the features
features = [existing]
for name, text in sorted(autoFeatures.items()):
features.append(text)
for name, text in sorted(autoTables.items()):
features.append(text)
features = "\n\n".join(features)
# write the result
f = open(path, "wb")
f.write(features)
f.close()
def writeFeatures_kern(self):
"""
Write the kern feature to a string and return it.
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
writer = KernFeatureWriter(self.font)
return writer.write()
def writeFeatures_head(self):
"""
Write the head to a string and return it.
This gets the values for the file using the fallback
system as described below:
===== ===
X.XXX versionMajor.versionMinor
===== ===
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
versionMajor = getAttrWithFallback(self.font.info, "versionMajor")
versionMinor = getAttrWithFallback(self.font.info, "versionMinor")
value = "%d.%s" % (versionMajor, str(versionMinor).zfill(3))
writer = FeatureTableWriter("head")
writer.addLineWithKeyValue("FontRevision", value)
return writer.write()
def writeFeatures_hhea(self):
"""
Write the hhea to a string and return it.
This gets the values for the file using the fallback
system as described below:
=========== ===
Ascender openTypeHheaAscender
Descender openTypeHheaDescender
LineGap openTypeHheaLineGap
CaretOffset openTypeHheaCaretOffset
=========== ===
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
ascender = getAttrWithFallback(self.font.info, "openTypeHheaAscender")
descender = getAttrWithFallback(self.font.info, "openTypeHheaDescender")
lineGap = getAttrWithFallback(self.font.info, "openTypeHheaLineGap")
caret = getAttrWithFallback(self.font.info, "openTypeHheaCaretOffset")
writer = FeatureTableWriter("hhea")
writer.addLineWithKeyValue("Ascender", _roundInt(ascender))
writer.addLineWithKeyValue("Descender", _roundInt(descender))
writer.addLineWithKeyValue("LineGap", _roundInt(lineGap))
writer.addLineWithKeyValue("CaretOffset", _roundInt(caret))
return writer.write()
def writeFeatures_name(self):
"""
Write the name to a string and return it.
This gets the values for the file using the fallback
system as described below:
========= ===
nameid 0 copyright
nameid 7 trademark
nameid 8 openTypeNameManufacturer
nameid 9 openTypeNameDesigner
nameid 10 openTypeNameDescription
nameid 11 openTypeNameManufacturerURL
nameid 12 openTypeNameDesignerURL
nameid 13 openTypeNameLicense
nameid 14 openTypeNameLicenseURL
nameid 19 openTypeNameSampleText
========= ===
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
idToAttr = [
(0 , "copyright"),
(7 , "trademark"),
(8 , "openTypeNameManufacturer"),
(9 , "openTypeNameDesigner"),
(10 , "openTypeNameDescription"),
(11 , "openTypeNameManufacturerURL"),
(12 , "openTypeNameDesignerURL"),
(13 , "openTypeNameLicense"),
(14 , "openTypeNameLicenseURL"),
(19 , "openTypeNameSampleText")
]
multilineNameTableEntries = {}
lines = []
for id, attr in idToAttr:
value = getAttrWithFallback(self.font.info, attr)
if value is None:
continue
s = 'nameid %d "%s";' % (id, winStr(value))
lines.append(s)
s = 'nameid %d 1 "%s";' % (id, macStr(value))
lines.append(s)
if not lines:
return ""
writer = FeatureTableWriter("name")
for line in lines:
writer.addLine(line)
return writer.write()
def writeFeatures_OS2(self):
"""
Write the OS/2 to a string and return it.
This gets the values for the file using the fallback
system as described below:
============= ===
FSType openTypeOS2Type
Panose openTypeOS2Panose
UnicodeRange openTypeOS2UnicodeRanges
CodePageRange openTypeOS2CodePageRanges
TypoAscender openTypeOS2TypoAscender
TypoDescender openTypeOS2TypoDescender
TypoLineGap openTypeOS2TypoLineGap
winAscent openTypeOS2WinAscent
winDescent openTypeOS2WinDescent
XHeight xHeight
CapHeight capHeight
WeightClass openTypeOS2WeightClass
WidthClass openTypeOS2WidthClass
Vendor openTypeOS2VendorID
============= ===
**This should not be called externally.** Subclasses
may override this method to handle the string creation
in a different way if desired.
"""
codePageBitTranslation = {
0 : "1252",
1 : "1250",
2 : "1251",
3 : "1253",
4 : "1254",
5 : "1255",
6 : "1256",
7 : "1257",
8 : "1258",
16 : "874",
17 : "932",
18 : "936",
19 : "949",
20 : "950",
21 : "1361",
48 : "869",
49 : "866",
50 : "865",
51 : "864",
52 : "863",
53 : "862",
54 : "861",
55 : "860",
56 : "857",
57 : "855",
58 : "852",
59 : "775",
60 : "737",
61 : "708",
62 : "850",
63 : "437"
}
# writer
writer = FeatureTableWriter("OS/2")
# type
writer.addLineWithKeyValue("FSType", intListToNum(getAttrWithFallback(self.font.info, "openTypeOS2Type"), 0, 16))
# panose
panose = [str(i) for i in getAttrWithFallback(self.font.info, "openTypeOS2Panose")]
writer.addLineWithKeyValue("Panose", " ".join(panose))
# unicode ranges
unicodeRange = [str(i) for i in getAttrWithFallback(self.font.info, "openTypeOS2UnicodeRanges")]
if unicodeRange:
writer.addLineWithKeyValue("UnicodeRange", " ".join(unicodeRange))
# code page ranges
codePageRange = [codePageBitTranslation[i] for i in getAttrWithFallback(self.font.info, "openTypeOS2CodePageRanges") if i in codePageBitTranslation]
if codePageRange:
writer.addLineWithKeyValue("CodePageRange", " ".join(codePageRange))
# vertical metrics
writer.addLineWithKeyValue("TypoAscender", _roundInt(getAttrWithFallback(self.font.info, "openTypeOS2TypoAscender")))
writer.addLineWithKeyValue("TypoDescender", _roundInt(getAttrWithFallback(self.font.info, "openTypeOS2TypoDescender")))
writer.addLineWithKeyValue("TypoLineGap", _roundInt(getAttrWithFallback(self.font.info, "openTypeOS2TypoLineGap")))
writer.addLineWithKeyValue("winAscent", _roundInt(getAttrWithFallback(self.font.info, "openTypeOS2WinAscent")))
writer.addLineWithKeyValue("winDescent", abs(_roundInt(getAttrWithFallback(self.font.info, "openTypeOS2WinDescent"))))
writer.addLineWithKeyValue("XHeight", _roundInt(getAttrWithFallback(self.font.info, "xHeight")))
writer.addLineWithKeyValue("CapHeight", _roundInt(getAttrWithFallback(self.font.info, "capHeight")))
writer.addLineWithKeyValue("WeightClass", getAttrWithFallback(self.font.info, "openTypeOS2WeightClass"))
writer.addLineWithKeyValue("WidthClass", getAttrWithFallback(self.font.info, "openTypeOS2WidthClass"))
writer.addLineWithKeyValue("Vendor", '"%s"' % getAttrWithFallback(self.font.info, "openTypeOS2VendorID"))
return writer.write()
includeRE = re.compile(
"(include\s*\(\s*)"
"([^\)]+)"
"(\s*\))" # this won't actually capture a trailing space.
)
forceAbsoluteIncludesInFeaturesTestText = """
# absolute path
include(/Users/bob/foo1/bar1/default.fea);
# relative path
include(foo2/bar2/default.fea);
# . syntax
include(./foo3/bar3/default.fea);
# .. syntax
include(../foo4/bar4/default.fea);
# spaces around path
include( foo5/bar5/default.fea );
"""
forceAbsoluteIncludesInFeaturesTestResult = """
# absolute path
include(/Users/bob/foo1/bar1/default.fea);
# relative path
include(/test1/test2/foo2/bar2/default.fea);
# . syntax
include(/test1/test2/foo3/bar3/default.fea);
# .. syntax
include(/test1/foo4/bar4/default.fea);
# spaces around path
include( /test1/test2/foo5/bar5/default.fea );
"""
def forceAbsoluteIncludesInFeatures(text, directory):
"""
Convert relative includes in the *text*
to absolute includes.
>>> result = forceAbsoluteIncludesInFeatures(forceAbsoluteIncludesInFeaturesTestText, "/test1/test2")
>>> result == forceAbsoluteIncludesInFeaturesTestResult
True
"""
for match in reversed(list(includeRE.finditer(text))):
start, includePath, close = match.groups()
# absolute path
if os.path.isabs(includePath):
continue
# relative path
currentDirectory = directory
parts = includePath.split(os.sep)
for index, part in enumerate(parts):
part = part.strip()
if not part:
continue
# .. = up one level
if part == "..":
currentDirectory = os.path.dirname(currentDirectory)
# . = current level
elif part == ".":
continue
else:
break
subPath = os.sep.join(parts[index:])
srcPath = os.path.join(currentDirectory, subPath)
includeText = start + srcPath + close
text = text[:match.start()] + includeText + text[match.end():]
return text
def _roundInt(value):
return int(round(value))
# ----------------------
# Basic Feature Splitter
# ----------------------
stringRE = re.compile(
"(\"[^$\"]*\")"
)
featureTableStartRE = re.compile(
"("
"feature"
"\s+"
"\S{4}"
"\s*"
"\{"
"|"
"table"
"\s+"
"\S{4}"
"\s*"
"\{"
")",
re.MULTILINE
)
featureNameRE = re.compile(
"feature"
"\s+"
"(\S{4})"
"\s*"
"\{"
)
tableNameRE = re.compile(
"table"
"\s+"
"(\S{4})"
"\s*"
"\{"
)
def extractFeaturesAndTables(text, scannedFiles=[]):
# strip all comments
decommentedLines = [line.split("#")[0] for line in text.splitlines()]
text = "\n".join(decommentedLines)
# replace all strings with temporary placeholders.
destringedLines = []
stringReplacements = {}
for line in text.splitlines():
if "\"" in line:
line = line.replace("\\\"", "__ufo2fdk_temp_escaped_quote__")
for found in stringRE.findall(line):
temp = "__ufo2fdk_temp_string_%d__" % len(stringReplacements)
line = line.replace(found, temp, 1)
stringReplacements[temp] = found.replace("__ufo2fdk_temp_escaped_quote__", "\\\"")
line = line.replace("__ufo2fdk_temp_escaped_quote__", "\\\"")
destringedLines.append(line)
text = "\n".join(destringedLines)
# extract all includes
includes = []
for match in includeRE.finditer(text):
start, includePath, close = match.groups()
includes.append(includePath)
# slice off the text that comes before
# the first feature/table definition
precedingText = ""
startMatch = featureTableStartRE.search(text)
if startMatch is not None:
start, end = startMatch.span()
precedingText = text[:start].strip()
text = text[start:]
else:
precedingText = text
text = ""
# break the features
broken = _textBreakRecurse(text)
# organize into tables and features
features = {}
tables = {}
for text in broken:
text = text.strip()
if not text:
continue
# replace the strings
finalText = text
for temp, original in stringReplacements.items():
if temp in finalText:
del stringReplacements[temp]
finalText = finalText.replace(temp, original, 1)
finalText = finalText.strip()
# grab feature or table names and store
featureMatch = featureNameRE.search(text)
if featureMatch is not None:
features[featureMatch.group(1)] = finalText
else:
tableMatch = tableNameRE.search(text)
tables[tableMatch.group(1)] = finalText
# scan all includes
for path in includes:
if path in scannedFiles:
continue
scannedFiles.append(path)
if os.path.exists(path):
f = open(path, "r")
text = f.read()
f.close()
f, t = extractFeaturesAndTables(text, scannedFiles)
features.update(f)
tables.update(t)
return features, tables
def _textBreakRecurse(text):
matched = []
match = featureTableStartRE.search(text)
if match is None:
matched.append(text)
else:
start, end = match.span()
# add any preceding text to the previous item
if start != 0:
precedingText = matched.pop(0)
precedingText += text[:start]
matched.insert(0, precedingText)
# look ahead to see if there is another feature
next = text[end:]
nextMatch = featureTableStartRE.search(next)
if nextMatch is None:
# if nothing has been found, add
# the remaining text to the feature
matchedText = text[start:]
matched.append(matchedText)
else:
# if one has been found, grab all text
# from before the feature start and add
# it to the current feature.
nextStart, nextEnd = nextMatch.span()
matchedText = text[:end + nextStart]
matched.append(matchedText)
# recurse through the remaining text
matched += _textBreakRecurse(next[nextStart:])
return matched
extractFeaturesAndTablesTestText = """
@foo = [bar];
# test commented item
#feature fts1 {
# sub foo by bar;
#} fts1;
feature fts2 {
sub foo by bar;
} fts2;
table tts1 {
nameid 1 "feature this { is not really a \\\"feature that { other thing is";
} tts1;feature fts3 { sub a by b;} fts3;
"""
extractFeaturesAndTablesTestResult = (
{
'fts2': 'feature fts2 {\n sub foo by bar;\n} fts2;',
'fts3': 'feature fts3 { sub a by b;} fts3;'
},
{
'tts1': 'table tts1 {\n nameid 1 "feature this { is not really a \\"feature that { other thing is";\n} tts1;'
}
)
def testBreakFeaturesAndTables():
"""
>>> r = extractFeaturesAndTables(extractFeaturesAndTablesTestText)
>>> r == extractFeaturesAndTablesTestResult
True
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
import os
import re
import logging
from pegasus.models import (
TargetPlatform, Architecture, ProductType, Language, Executor,
LibraryInfo
)
from pegasus.util import (
push_working_directory,
pop_working_directory,
find_path_in_list
)
class Linux(TargetPlatform):
# tuple: (prefix, extension)
product_type_data = {
ProductType.Invalid: ("", ""),
ProductType.Application: ("", ""),
ProductType.Commandline: ("", ""),
ProductType.DynamicLibrary: ("lib", ".so"),
ProductType.StaticLibrary: ("lib", ".a"),
}
def get_include_paths(self):
return [
"/usr/include",
"/usr/include/%s" % self.machine_triplet,
"/usr/local/include",
"/usr/lib/%s/%s/%s/include" % (
self.compiler.get_command(),
self.machine_triplet,
self.compiler.get_version()
),
"/usr/lib/%s/%s/%s/include-fixed" % (
self.compiler.get_command(),
self.machine_triplet,
self.compiler.get_version()
),
"/opt/vc/include"
]
def get_library_paths(self):
return [
"/usr/lib",
"/usr/lib/%s" % self.machine_triplet,
"/usr/local/lib",
"/opt/vc/lib/"
]
def default_compiler(self):
return "gcc"
def default_driver(self):
return "gnumake"
# determine how many threads can be used for make -j
# sysctl -n hw.ncpu
# or just use python.
# import multiprocessing; multiprocessing.cpu_count()
def default_architectures(self):
# <gnu/stubs-32.h> will be missing on a 64-bit machine, if it's not setup to build for 32-bit.
return [
Architecture.x86_64,
Architecture.x86
]
def prevalidate_driver_data(self, data):
pass
def post_load_driver_schema(self, driver_schema):
commandline = "%s -dumpmachine" % (self.compiler.get_command())
executor = Executor()
result, output = executor.execute(commandline=commandline, log_output=False, log_errors=False, fail_on_error=False)
if result and output:
self.machine_triplet = output.strip()
def supports_fat_binaries(self):
return False
def process_params_for_driver(self, layoutparams, driver, driver_name):
links = []
for link in layoutparams.links:
links.append("-l%s" % link)
layoutparams.links = links
return layoutparams
def link_product_dependency(self, toplevel, dependent):
full_path = os.path.normpath(os.path.join(
toplevel.context.project_to_base_relative,
dependent.context.parent_to_root_relative,
dependent.context.product_root,
self.get_full_product_path(dependent.context.product)
))
if dependent.context.product.name not in toplevel.links:
toplevel.links.append(dependent.context.product.name)
product_dirname = os.path.dirname(full_path)
if product_dirname not in toplevel.libdirs:
toplevel.libdirs.append(product_dirname)
def get_full_product_name(self, product):
prefix, extension = self.__class__.product_type_data[product.output]
return "%s%s%s" % (prefix, product.name, extension)
def get_full_product_path(self, product):
return self.get_full_product_name(product)
def get_full_symbols_path(self, product):
logging.info("TODO: implement get_full_symbols_path")
return None
def product_supported(self, product, driver_instance, driver_name):
return product.output in self.__class__.product_type_data.keys()
def check_source_compiles(self, source, **kwargs):
return self.compiler.check_source_compiles(source, **kwargs)
def check_source_runs(self, source, **kwargs):
return self.compiler.check_source_runs(source, **kwargs)
def find_include_path(self, path, **kwargs):
return find_path_in_list(path, self.get_include_paths())
def find_library_path(self, path, **kwargs):
return find_path_in_list(path, self.get_library_paths())
def find_package(self, name, **kwargs):
# TODO: check different package managers
# assume debian for now (testing)
debian = "dpkg -s %(package_name)s"
package_managers = {
"debian" : debian
}
package_manager = "debian"
data = {
"package_name": name
}
command = package_managers[package_manager] % data
executor = Executor()
result, output = executor.execute(commandline=command, fail_on_error=False, log_output=False, log_errors=False)
return result
def find_library(self, name, **kwargs):
paths = self.get_library_paths()
so_pattern = re.compile(
"lib%s\."
"(?P<extension>so)"
"(?P<major>\.\d)"
"(?P<minor>\.\d)"
"(?P<patch>\.\d)" % name
)
static_pattern = re.compile(
"lib%s\.(?P<extension>a)" % name
)
best_match = None
# search through stock paths
for path in paths:
# skip paths that don't exist
if not os.path.isdir(path):
continue
for item in os.listdir(path):
# for every file in that path, test against the patterns
so_match = so_pattern.search(item)
static_match = static_pattern.search(item)
if so_match:
# we found the pattern, set default value to ""
# and retrieve vars
vars = so_match.groupdict("")
major = vars["major"].lstrip(".")
minor = vars["minor"].lstrip(".")
patch = vars["patch"].lstrip(".")
#logging.info(
# "-> %s.%s [%s, %s, %s]"\
# % (name, vars["extension"], major, minor, patch)
#)
soname = "lib%s.so.%s.%s.%s" % (name, major, minor, patch)
li = LibraryInfo(
name=name, path=path, extension=vars["extension"],
major_version=major, minor_version=minor,
patch_version=patch, soname=soname, lastindex=so_match.lastindex
)
if not best_match or (li.lastindex > best_match.lastindex):
best_match = li
elif static_match:
vars = static_match.groupdict("")
li = LibraryInfo(
name=name, path=path, extension=vars["extension"],
major_version="", minor_version="",
patch_version="", soname="", lastindex=static_match.lastindex
)
if not best_match or (li.lastindex > best_match.lastindex):
best_match = li
return best_match
|
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import json
import os
import re
import sys
import urllib
import urllib2
# Where all the data lives.
ROOT_URL = "http://build.chromium.org/p/chromium.memory.full/builders"
# TODO(groby) - support multi-line search from the command line. Useful when
# scanning for classes of failures, see below.
SEARCH_STRING = """<p class=\"failure result\">
Failed memory test: content
</p>"""
# Location of the log cache.
CACHE_DIR = "buildlogs.tmp"
# If we don't find anything after searching |CUTOFF| logs, we're probably done.
CUTOFF = 200
def EnsurePath(path):
"""Makes sure |path| does exist, tries to create it if it doesn't."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class Cache(object):
def __init__(self, root_dir):
self._root_dir = os.path.abspath(root_dir)
def _LocalName(self, name):
"""If name is a relative path, treat it as relative to cache root.
If it is absolute and under cache root, pass it through.
Otherwise, raise error.
"""
if os.path.isabs(name):
assert os.path.commonprefix([name, self._root_dir]) == self._root_dir
else:
name = os.path.join(self._root_dir, name)
return name
def _FetchLocal(self, local_name):
local_name = self._LocalName(local_name)
EnsurePath(os.path.dirname(local_name))
if os.path.exists(local_name):
f = open(local_name, 'r')
return f.readlines();
return None
def _FetchRemote(self, remote_name):
try:
response = urllib2.urlopen(remote_name)
except:
print "Could not fetch", remote_name
raise
return response.read()
def Update(self, local_name, remote_name):
local_name = self._LocalName(local_name)
EnsurePath(os.path.dirname(local_name))
blob = self._FetchRemote(remote_name)
f = open(local_name, "w")
f.write(blob)
return blob.splitlines()
def FetchData(self, local_name, remote_name):
result = self._FetchLocal(local_name)
if result:
return result
# If we get here, the local cache does not exist yet. Fetch, and store.
return self.Update(local_name, remote_name)
class Builder(object):
def __init__(self, waterfall, name):
self._name = name
self._waterfall = waterfall
def Name(self):
return self._name
def LatestBuild(self):
return self._waterfall.GetLatestBuild(self._name)
def GetBuildPath(self, build_num):
return "%s/%s/builds/%d" % (
self._waterfall._root_url, urllib.quote(self._name), build_num)
def _FetchBuildLog(self, build_num):
local_build_path = "builds/%s" % self._name
local_build_file = os.path.join(local_build_path, "%d.log" % build_num)
return self._waterfall._cache.FetchData(local_build_file,
self.GetBuildPath(build_num))
def _CheckLog(self, build_num, tester):
log_lines = self._FetchBuildLog(build_num)
return any(tester(line) for line in log_lines)
def ScanLogs(self, tester):
occurrences = []
build = self.LatestBuild()
no_results = 0
while build != 0 and no_results < CUTOFF:
if self._CheckLog(build, tester):
occurrences.append(build)
else:
no_results = no_results + 1
build = build - 1
return occurrences
class Waterfall(object):
def __init__(self, root_url, cache_dir):
self._root_url = root_url
self._builders = {}
self._top_revision = {}
self._cache = Cache(cache_dir)
def Builders(self):
return self._builders.values()
def Update(self):
self._cache.Update("builders", self._root_url)
self.FetchInfo()
def FetchInfo(self):
if self._top_revision:
return
html = self._cache.FetchData("builders", self._root_url)
""" Search for both builders and latest build number in HTML
<td class="box"><a href="builders/<builder-name>"> identifies a builder
<a href="builders/<builder-name>/builds/<build-num>"> is the latest build.
"""
box_matcher = re.compile('.*a href[^>]*>([^<]*)\<')
build_matcher = re.compile('.*a href=\"builders/(.*)/builds/([0-9]+)\".*')
last_builder = ""
for line in html:
if 'a href="builders/' in line:
if 'td class="box"' in line:
last_builder = box_matcher.match(line).group(1)
self._builders[last_builder] = Builder(self, last_builder)
else:
result = build_matcher.match(line)
builder = result.group(1)
assert builder == urllib.quote(last_builder)
self._top_revision[last_builder] = int(result.group(2))
def GetLatestBuild(self, name):
self.FetchInfo()
assert self._top_revision
return self._top_revision[name]
class MultiLineChange(object):
def __init__(self, lines):
self._tracked_lines = lines
self._current = 0
def __call__(self, line):
""" Test a single line against multi-line change.
If it matches the currently active line, advance one line.
If the current line is the last line, report a match.
"""
if self._tracked_lines[self._current] in line:
self._current = self._current + 1
if self._current == len(self._tracked_lines):
self._current = 0
return True
else:
self._current = 0
return False
def main(argv):
# Create argument parser.
parser = argparse.ArgumentParser()
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument("--update", action='store_true')
commands.add_argument("--find", metavar='search term')
parser.add_argument("--json", action='store_true',
help="Output in JSON format")
args = parser.parse_args()
path = os.path.abspath(os.path.dirname(argv[0]))
cache_path = os.path.join(path, CACHE_DIR)
full = Waterfall(ROOT_URL, cache_path)
if args.update:
full.Update()
for builder in full.Builders():
print "Updating", builder.Name()
builder.ScanLogs(lambda x:False)
if args.find:
result = []
tester = MultiLineChange(args.find.splitlines())
full.FetchInfo()
if not args.json:
print "SCANNING FOR ", args.find
for builder in full.Builders():
if not args.json:
print "Scanning", builder.Name()
occurrences = builder.ScanLogs(tester)
if occurrences:
min_build = min(occurrences)
path = builder.GetBuildPath(min_build)
if args.json:
data = {}
data['builder'] = builder.Name()
data['first_affected'] = min_build
data['last_affected'] = max(occurrences)
data['last_build'] = builder.LatestBuild()
data['frequency'] = ((int(builder.LatestBuild()) - int(min_build)) /
len(occurrences))
data['total'] = len(occurrences)
data['first_url'] = path
result.append(data)
else:
print "Earliest occurrence in build %d" % min_build
print "Latest occurrence in build %d" % max(occurrences)
print "Latest build: %d" % builder.LatestBuild()
print path
print "%d total" % len(occurrences)
if args.json:
json.dump(result, sys.stdout, indent=2, sort_keys=True)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
#!/usr/bin/env python3
from sympy import *
from mpmath import *
from matplotlib.pyplot import *
import numpy as np
#init_printing() # make things prettier when we print stuff for debugging.
# ************************************************************************** #
# Magnetic field inside copper coil with hollow copper cylinder #
# ************************************************************************** #
# All values are in standard SI units unless otherwise noted.
# -------------------------------------------------------- #
# Default precision is insufficient, therefore we increase #
# precision. One can increase the number of decimal #
# places or bits, where the number of bits places is ~3.33 #
# times the number of decimal places. #
# -------------------------------------------------------- #
#mp.dps=25 # decimal places
mp.prec=80 # precision in bits
# ---------------------------------------------------------#
# Define Variables and Constants #
# ---------------------------------------------------------#
mu0 = 4*pi*1e-7 # vacuum permeability
rho_kuchling = 0.0172e-6 # resistivity Kuchling 17th edition, p.649, tab. 45
sigma_kuchling = 1/rho_kuchling
sigma_abs = 53e6 # de.wikipedia.org/wiki/Kupfer: 58.1e6
sigma_arg = 52e6 # de.wikipedia.org/wiki/Kupfer: 58.1e6
r1 = 30e-3 # inner radius of copper cylinder
r2 = 35e-3 # outer radius of copper cylinder
B0 = 6.9e-2 # adjust this as needed for scaling
npts = 1e3
fmin = 1
fmax = 2500
# -----------------------------------------------------#
# NOTE: According to formula 26 on p.14, the B-Field #
# inside the cylinder (r<r1) is equal to the B-Field #
# at the inner boundary of the copper cylinder #
# (B(r1)), therefore we set r to r1 for further #
# calculations. #
# -----------------------------------------------------#
r = r1
# -----------------------------------------------------#
# Create a list for convenient printing of vars to #
# file, add LaTeX where necessary. #
# -----------------------------------------------------#
params = [
' ' + r'\textcolor{red}{$\sigma_{Fit,|\hat{B}|}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_abs) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + r'\textcolor{red}{$\sigma_{Fit,\angle\hat{B}}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_arg) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + r'\textcolor{red}{$\sigma_{Kuch}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_kuchling) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + '$\mu_0' + '$ & $' + '\SI{' + str(mu0) + r'}{\newton\per\ampere\squared}' + r'$\\' + "\n",
' ' + '$r' + '$ & $' + '\SI{' + str(r) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_1' + '$ & $' + '\SI{' + str(r1) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_2' + '$ & $' + '\SI{' + str(r2) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$B_0' + '$ & $' + '\SI{' + str(B0) + r'}{\tesla}' + r'$\\' + "\n",
' ' + '$NPTS' + '$ & $' + r'\num{' + str(npts) + '}' + r'$\\' + "\n",
' ' + '$f_{min}' + '$ & $' + '\SI{' + str(fmin) + r'}{\hertz}' + r'$\\' + "\n",
' ' + '$f_{max}' + '$ & $' + '\SI{' + str(fmax) + r'}{\hertz}' + r'$\\' + "\n",
]
font = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 9,
}
titlefont = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 10,
}
plot_legend_fontsize = 9
plot_color_fit = 'blue'
plot_color_measurements = 'black'
plot_label_measurements = 'Messwerte'
plot_size_measurements = 16
plot_scale_x = 'log'
plot_label_fit = 'Fit-Funktion'
plot_label_x = 'Frequenz (Hz)'
plot_1_label_y = 'gemessene Spannung (mV)'
plot_2_label_y = 'Phase (Grad)'
plot_1_title = r"Exakte L\"osung: Betrag Magnetfeld, Spule mit Kupferrohr"
plot_2_title = r"Exakte L\"osung: Phase Magnetfeld, Spule mit Kupferrohr"
# ---------------------------------------------------------#
# Functions #
# #
# See formula 26 on p.14 of script for experiment. #
# #
# NOTE: We use frequency f instead of angular frequency #
# omega since that is what we actually set on the function #
# generator. #
# NOTE: We evaluate B_abs and B_arg based on two different #
# values for sigma, which allows to fit each of the curves #
# more accurately. #
# ---------------------------------------------------------#
k_abs = lambda f: sqrt((2*np.pi*f*mu0*sigma_abs)/2)*(mpc(1,-1))
k_arg = lambda f: sqrt((2*np.pi*f*mu0*sigma_arg)/2)*(mpc(1,-1))
enum_abs = lambda f:(
besselj(0,k_abs(f)*r)
* bessely(2,k_abs(f)*r1)
- besselj(2,k_abs(f)*r1)
* bessely(0,k_abs(f)*r)
)
denom_abs = lambda f:(
besselj(0,k_abs(f)*r2)
* bessely(2,k_abs(f)*r1)
- besselj(2,k_abs(f)*r1)
* bessely(0,k_abs(f)*r2)
)
enum_arg = lambda f:(
besselj(0,k_arg(f)*r)
* bessely(2,k_arg(f)*r1)
- besselj(2,k_arg(f)*r1)
* bessely(0,k_arg(f)*r)
)
denom_arg = lambda f:(
besselj(0,k_arg(f)*r2)
* bessely(2,k_arg(f)*r1)
- besselj(2,k_arg(f)*r1)
* bessely(0,k_arg(f)*r2)
)
B_abs = lambda f: abs(enum_abs(f) / denom_abs(f) * B0)
B_arg = lambda f: arg(enum_arg(f) / denom_arg(f) * B0)
# ---------------------------------------------------------#
# Generate points for frequency axis #
# ---------------------------------------------------------#
n = np.linspace(1,npts,npts)
expufunc = np.frompyfunc(exp,1,1)
frequency_vector = fmin*expufunc(n*log(fmax-fmin)/npts)
# ---------------------------------------------------------#
# Numerically evaluate functions #
# ---------------------------------------------------------#
Babsufunc = np.frompyfunc(B_abs,1,1)
B_abs_num = Babsufunc(frequency_vector)
Bargufunc = np.frompyfunc(B_arg,1,1)
B_arg_num = Bargufunc(frequency_vector)
# ---------------------------------------------------------#
# Unfortunately, the arg() function only delivers values #
# between -pi and +pi for the angle of a complex number, #
# which, while correct, is not suitable for pretty #
# plotting, so we will shift the values larger then zero #
# accordingly for a continuous curve. #
# ---------------------------------------------------------#
B_arg_num = np.unwrap(B_arg_num)
# ---------------------------------------------------------#
# Measurement Values from experiment #
# ---------------------------------------------------------#
frequencies_measured = np.array([ 1, 10, 20, 40, 80, 120, 160, 200, 400, 600, 800, 1000, 1200, 1500])
phases_degrees = np.array([ 2, 19.2, 35.2, 56.7, 76.7, 87, 94, 100, 121, 140, 155, 170, 180, 200])
voltages = np.array([ 7e-2, 6.6e-2, 5.78e-2, 4.18e-2, 2.44e-2, 1.69e-2, 1.27e-2, 1e-2, 4.8e-3, 2.9e-3, 1.9e-3, 1.4e-3, 1e-3, 7e-4])
# ---------------------------------------------------------#
# Scale values for improved legibility in plot #
# ---------------------------------------------------------#
B_abs_num = 1e3 * B_abs_num
voltages = 1e3 * voltages
B_arg_num = 180/np.pi*B_arg_num
# ---------------------------------------------------------#
# Plot the Things #
# ---------------------------------------------------------#
matplotlib.pyplot.rc('text', usetex=True)
matplotlib.pyplot.rc('font', family='serif')
fig = figure(1)
axes1 = fig.add_subplot(211)
axes1.plot(frequency_vector,B_abs_num,color=plot_color_fit,label=plot_label_fit)
axes1.scatter(frequencies_measured,
voltages,
color=plot_color_measurements,
s=plot_size_measurements,
label=plot_label_measurements
)
axes1.set_xlim([fmin*0.9,fmax*1.1])
axes1.set_xscale(plot_scale_x)
axes1.set_xlabel(plot_label_x,fontdict=font)
axes1.set_ylabel(plot_1_label_y,fontdict=font)
axes1.set_title(plot_1_title,fontdict=titlefont)
axes1.legend(fontsize=plot_legend_fontsize)
axes1.tick_params(labelsize=9)
axes2 = fig.add_subplot(212)
axes2.plot(frequency_vector,B_arg_num,color=plot_color_fit,label=plot_label_fit)
axes2.scatter(frequencies_measured,
-phases_degrees,
color=plot_color_measurements,
s=plot_size_measurements,
label=plot_label_measurements
)
axes2.set_xlim([fmin*0.9,fmax*1.1])
axes2.set_xscale(plot_scale_x)
axes2.set_xlabel(plot_label_x,fontdict=font)
axes2.set_ylabel(plot_2_label_y,fontdict=font)
axes2.set_title(plot_2_title,fontdict=titlefont)
axes2.legend(fontsize=plot_legend_fontsize)
axes2.tick_params(labelsize=9)
fig.subplots_adjust(bottom=0.1,left=0.1,right=0.9,top=0.95,hspace=0.5)
fig.savefig('plots-pgf/hollow--cu--freq--exact.pgf')
fig.savefig('plots-pdf/hollow--cu--freq--exact.pdf')
# ---------------------------------------------------------#
# Save listing to file #
# ---------------------------------------------------------#
dumpfile = open('listings/hollow--cu--freq--exact.tex', 'w')
table_opening = r"""
{%
\begin{center}
\captionof{table}{%
Parameter f\"ur Fit-Funktion in Abbildung~\ref{fig:cu:freq:exact}
}
\label{tab:fitparams:cu:freq:exact}
\sisetup{%
%math-rm=\mathtt,
scientific-notation=engineering,
table-format = +3.2e+2,
round-precision = 3,
round-mode = figures,
}
\begin{tabular}{lr}
\toprule
"""
table_closing = r"""
\bottomrule
\end{tabular}
\end{center}
}
"""
dumpfile.writelines(table_opening)
for line in params:
dumpfile.writelines(line)
dumpfile.writelines(table_closing)
dumpfile.close()
# ---------------------------------------------------------#
# Save Value of sigma to file for error analysis #
# ---------------------------------------------------------#
np.savetxt('numpy-txt/hollow--cu--freq--exact.txt',([sigma_abs,sigma_arg]))
|
|
from . import _dbg
from . import gs, gsq, sh
from .margo_agent import MargoAgent
from .margo_common import OutputLogger, TokenCounter, Mutex
from .margo_render import render
from .margo_state import State, actions, client_actions, Config, _view_scope_lang, view_is_9o, MgView
from base64 import b64decode
from collections import namedtuple
import glob
import os
import shlex
import sublime
import time
import webbrowser
class MargoSingleton(object):
def __init__(self):
self._ready = False
self.out = OutputLogger('margo')
self.agent_tokens = TokenCounter('agent', format='{}#{:03d}', start=6)
self.run_tokens = TokenCounter('9o.run')
self.agent = None
self.enabled_for_langs = ['*']
self.state = State()
self.status = []
self.output_handler = None
self._client_actions_handlers = {
client_actions.Activate: self._handle_act_activate,
client_actions.Restart: self._handle_act_restart,
client_actions.Shutdown: self._handle_act_shutdown,
client_actions.CmdOutput: self._handle_act_output,
client_actions.DisplayIssues: self._handle_DisplayIssues,
}
self.file_ids = []
self._hud_state = {}
self._hud_state_lock = Mutex(name='margo.MargoSingleton._hud_state_lock')
self.hud_name = 'GoSublime/HUD'
self.hud_id = self.hud_name.replace('/','-').lower()
self._views = {}
self._view_lock = Mutex(name='margo.MargoSingleton._view_lock')
self._gopath = ''
def _sync_settings(self):
old, new = self._gopath, sh.getenv('GOPATH')
if not new or new == old:
return
self._gopath = new
ag = self.agent
if not ag or new == ag.gopath:
return
self.out.println('Stopping agent. GOPATH changed: `%s` -> `%s`' % (ag.gopath, new))
self.stop(ag=ag)
def render(self, rs=None):
# ST has some locking issues due to its "thread-safe" API
# don't access things like sublime.active_view() directly
if rs:
for err in rs.state.errors:
self.out.println('Error: %s' % err)
self.state = rs.state
cfg = rs.state.config
self.enabled_for_langs = cfg.enabled_for_langs
if cfg.override_settings:
gs._mg_override_settings = cfg.override_settings
def _render():
render(
mg=mg,
view=gs.active_view(),
state=self.state,
status=self.status,
)
if rs:
self._handle_client_actions(rs)
if rs.agent and rs.agent is not self.agent:
rs.agent.stop()
sublime.set_timeout(_render)
def _handle_act_activate(self, rs, act):
gs.focus(act.name or act.path, row=act.row, col=act.col, focus_pat='')
def _handle_act_restart(self, rs, act):
self.restart()
def _handle_act_shutdown(self, rs, act):
self.stop()
def _handle_act_output(self, rs, act):
h = self.output_handler
if h:
h(rs, act)
def _handle_DisplayIssues(self, rs, act):
gs.active_view().run_command('margo_display_issues')
def _handle_client_actions(self, rs):
for act in rs.state.client_actions:
f = self._client_actions_handlers.get(act.action_name)
if f:
f(rs, act)
else:
self.out.println('Unknown client-action: %s: %s' % (act.action_name, act))
def render_status(self, *a):
self.status = list(a)
self.render()
def clear_status(self):
self.render_status()
def start(self):
self.restart()
def restart(self):
ag = self.agent
if ag:
gsq.dispatch('mg.restart-stop', ag.stop)
self.agent = MargoAgent(self)
self.agent.start()
def stop(self, ag=None):
if not ag or ag is self.agent:
ag, self.agent = self.agent, None
if ag:
ag.stop()
def enabled(self, view):
if not self._ready:
return False
if '*' in self.enabled_for_langs:
return True
_, lang = _view_scope_lang(view, 0)
return lang in self.enabled_for_langs
def can_trigger_event(self, view, allow_9o=False):
_pf=_dbg.pf()
if view is None:
return False
if view.is_loading():
return False
if not self.enabled(view):
return False
mgv = self.view(view.id(), view=view)
if allow_9o and mgv.is_9o:
return True
if not mgv.is_file:
return False
return True
def _gs_init(self):
self._sync_settings()
gs.sync_settings_callbacks.append(self._sync_settings)
for w in sublime.windows():
for v in w.views():
if v is not None:
self.view(v.id(), view=v)
mg._ready = True
mg.start()
def _hud_create_panel(self, win):
view = win.create_output_panel(self.hud_name)
if win == sublime.active_window():
win.focus_view(win.active_view())
syntax = gs.tm_path('hud')
settings = view.settings()
if settings.get('syntax') == syntax:
return view
view.set_syntax_file(syntax)
view.set_read_only(True)
view.set_name(self.hud_name)
opts = {
'line_numbers': False,
'gutter': False,
'margin': 0,
'highlight_line': False,
'rulers': [],
'fold_buttons': False,
'scroll_past_end': False,
}
settings.erase('color_scheme')
for k, v in opts.items():
settings.set(k, v)
return view
def is_hud_view(self, view):
if view is None:
return False
return view.settings().get('syntax') == gs.tm_path('hud')
def _hud_win_state(self, win):
default = (None, None)
if win is None:
return default
return self._hud_state.get(win.id()) or default
def hud_panel(self, win):
with self._hud_state_lock:
view, phantoms = self._hud_win_state(win)
wid = win.id()
m = self._hud_state
if view is None:
view = self._hud_create_panel(win)
m[wid] = (view, phantoms)
if phantoms is None:
phantoms = sublime.PhantomSet(view, self.hud_name)
m[wid] = (view, phantoms)
if len(m) > 1:
wids = [w.id() for w in sublime.windows()]
for id in list(m.keys()):
if id not in wids:
del m[id]
return (view, phantoms)
def view(self, id, view=None):
with self._view_lock:
mgv = self._views.get(id)
if view is not None:
if mgv is None:
mgv = MgView(mg=self, view=view)
self._views[mgv.id] = mgv
else:
mgv.sync(view=view)
return mgv
def _sync_view(self, event, view):
if event in ('pre_close', 'close'):
with self._view_lock:
self._views.pop(view.id(), None)
return
_pf=_dbg.pf(dot=event)
file_ids = []
for w in sublime.windows():
for v in w.views():
file_ids.append(v.id())
self.file_ids = file_ids
self.view(view.id(), view=view)
with self._view_lock:
m = self._views
self._views = {k: m[k] for k in set(file_ids).intersection(set(m.keys()))}
def event(self, name, view, handler, args):
if view is None:
return None
_pf=_dbg.pf(dot=name)
win = view.window()
if self.is_hud_view(view):
view = gs.active_view(win=win)
win.focus_view(view)
def handle_event(gt=0):
if gt > 0:
_pf.gt=gt
self._sync_view(name, view)
if not self.can_trigger_event(view):
return None
try:
return handler(*args)
except Exception:
gs.error_traceback('mg.event:%s' % handler)
return None
blocking = (
'pre_save',
'query_completions',
)
if name in blocking:
return handle_event(gt=0.100)
sublime.set_timeout(handle_event)
def _is_str(self, s):
return isinstance(s, str)
def _is_act(self, m):
return isinstance(m, dict) and self._is_str(m.get('Name'))
def _lst_of(self, l, f):
return isinstance(l, list) and l and len(list(filter(f, l))) == len(l)
def navigate(self, href, *, view=None, win=None):
if href.startswith('https://') or href.startswith('http://'):
gsq.launch('mg.navigate', lambda: webbrowser.open_new_tab(href))
return
dataPfx = 'data:application/json;base64,'
data = b64decode(href[len(dataPfx):]) if href.startswith(dataPfx) else href
view = gs.active_view(view=view, win=win)
x, err = gs.json_decode(data, None)
if self._is_act(x):
self.queue(actions=[x], view=view, delay=0.100)
elif self._lst_of(x, self._is_act):
self.queue(actions=x, view=view, delay=0.100)
elif self._lst_of(x, self._is_str):
view.window().run_command('gs9o_open', {'run': x, 'focus_view': False})
else:
self.out.println('mg.navigate: Invalid href `%s`, expected `http(s)://` or data:json`{Name: action}|[command args...]`, error: %s' % (href, err))
def agent_starting(self, ag):
if ag is not self.agent:
return
self.render_status('starting margo')
def agent_ready(self, ag):
if ag is not self.agent:
return
self.clear_status()
self.on_activated(gs.active_view())
def agent_stopped(self, ag):
if ag is not self.agent:
return
self.agent = None
self.clear_status()
def _send_start(self):
if not self.agent:
self.start()
def queue(self, *, actions=[], view=None, delay=-1):
self._send_start()
self.agent.queue(actions=actions, view=view, delay=delay)
def send(self, *, actions=[], cb=None, view=None):
self._send_start()
return self.agent.send(actions=actions, cb=cb, view=view)
def on_new(self, view):
pass
def on_pre_close(self, view):
pass
def on_query_completions(self, view, prefix, locations):
_, lang = _view_scope_lang(view, 0)
if not lang:
return None
act = actions.QueryCompletions
if lang == 'cmd-prompt':
act = self._cmd_completions_act(view, prefix, locations)
if not act:
return None
view = gs.active_view(win=view.window())
if view is None:
return None
rq = self.send(view=view, actions=[act])
rs = rq.wait(0.500)
if not rs:
self.out.println('aborting QueryCompletions. it did not respond in time')
return None
if rs.error:
self.out.println('completion error: %s: %s' % (act, rs.error))
return
if rs.state.view.src:
self._fmt_rs(
view=view,
event='query_completions',
rq=rq,
rs=rs,
)
cl = [c.entry() for c in rs.state.completions]
opts = rs.state.config.auto_complete_opts
return (cl, opts) if opts != 0 else cl
def _cmd_completions_act(self, view, prefix, locations):
pos = locations[0]
line = view.line(pos)
src = view.substr(line)
if '#' not in src:
return None
i = src.index('#')
while src[i] == ' ' or src[i] == '#':
i += 1
src = src[i:]
pos = pos - line.begin() - i
name = ''
args = shlex.split(src)
if args:
name = args[0]
args = args[1:]
act = actions.QueryCmdCompletions.copy()
act['Data'] = {
'Pos': pos,
'Src': src,
'Name': name,
'Args': args,
}
return act
def on_hover(self, view, pt, zone):
act = actions.QueryTooltips.copy()
row, col = view.rowcol(pt)
act['Data'] = {
'Row': row,
'Col': col,
}
self.queue(view=view, actions=[act])
def on_activated(self, view):
self.queue(view=view, actions=[actions.ViewActivated])
def on_modified(self, view):
self.queue(view=view, actions=[actions.ViewModified])
def on_selection_modified(self, view):
self.queue(view=view, actions=[actions.ViewPosChanged])
def fmt(self, view):
return self._fmt_save(view=view, actions=[actions.ViewFmt], event='fmt', timeout=5.000)
def on_pre_save(self, view):
return self._fmt_save(view=view, actions=[actions.ViewPreSave], event='pre_save', timeout=2.000)
def _fmt_save(self, *, view, actions, event, timeout):
rq = self.send(view=view, actions=actions)
rs = rq.wait(timeout)
self._fmt_rs(
view=view,
event=event,
rq=rq,
rs=rs,
)
def _fmt_rs(self, *, view, event, rq, rs):
id_nm = '%d: %s' % (view.id(), view.file_name() or view.name())
if not rs:
self.out.println('%s timedout on view %s' % (event, id_nm))
return
if rs.error:
self.out.println('%s error in view %s: %s' % (event, id_nm, rs.error))
return
req = rq.props.get('View', {})
res = rs.state.view
req_name, req_src = req.get('Name'), req.get('Src')
res_name, res_src = res.name, res.src
if not res_name or not res_src:
return
if req_name != res_name:
err = '\n'.join((
'PANIC!!! FMT REQUEST RECEIVED A RESPONSE TO ANOTHER VIEW',
'PANIC!!! THIS IS A BUG THAT SHOULD BE REPORTED ASAP',
))
self.out.println(err)
gs.show_output('mg.PANIC', err)
return
view.run_command('margo_render_src', {'src': res_src})
def on_post_save(self, view):
self.queue(view=view, actions=[actions.ViewSaved])
def on_load(self, view):
self.on_activated(view)
def example_extension_file(self):
return gs.dist_path('src/margo.sh/extension-example/extension-example.go')
def extension_file(self, install=False):
src_dir = gs.user_path('src', 'margo')
def ext_fn():
l = sorted(glob.glob('%s/*.go' % src_dir))
return l[0] if l else ''
fn = ext_fn()
if fn or not install:
return fn
try:
gs.mkdirp(src_dir)
with open('%s/margo.go' % src_dir, 'xb') as f:
s = open(self.example_extension_file(), 'rb').read()
f.write(s)
except FileExistsError:
pass
except Exception:
gs.error_traceback('mg.extension_file', status_txt='Cannot create default margo extension package')
return ext_fn()
mg = MargoSingleton()
def gs_init(_):
sublime.set_timeout(mg._gs_init)
def gs_fini(_):
mg.stop()
|
|
#!/usr/bin/python
# File created on 27 Jan 2012.
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
from os import makedirs, sys, remove, rename
from sys import path
import re, math, traceback
from copy import copy
from optparse import OptionParser, OptionGroup
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf, printf, eprintf, exit_process, ShortenORFId
from libs.python_modules.utils.sysutil import getstatusoutput
from libs.python_modules.utils.errorcodes import error_message, get_error_list, insert_error
from libs.python_modules.utils.errorcodes import *
except:
print(""" Could not load some user defined module functions""")
print(""" Make sure your typed 'source MetaPathwaysrc' """)
print(""" """)
sys.exit(3)
usage= sys.argv[0] +" -d dbname1 -b blastout_for_database1 -m map_for_database1 [-d dbname2 -b blastout_for_database2 -m map_for_database2 ] """
parser = None
errorcode = 5
def createParser():
global parser
epilog = """This script parses BLAST/LAST search results of the amino acid sequences against the reference protein databases, in a tabular format. In the context of MetaPathways these files are available in the in the folder blast_results. The tabular results are put in individual files, one for each of the databases and algorithms combinations. This script parses these results and uses the hits based on the specified cutoffs for the evalue, bit score ratio, etc the parsed results are put in file named according to the format
<samplename><dbname><algorithm>out.parsed.txt. These parsed files are in a tabular format and each row contains information about the hits in terms of start, end, query name, match name, bit score ratio, etc."""
parser = OptionParser(usage, epilog= epilog)
parser.add_option("-b", "--blastoutput", dest="input_blastout", action='append', default=[],
help='the input blastout files [at least 1 REQUIRED]')
parser.add_option("-d", "--dbasename", dest="database_name", action='append', default=[],
help='the database names [at least 1 REQUIRED]')
parser.add_option("-o", "--parsedoutput", dest="parsed_output", default=None,
help='the parsed output file [OPTIONAL]')
parser.add_option("-r", "--ref_score", dest="refscore_file",
help='the refscore table [REQUIRED]')
parser.add_option("-m", "--map_for_database", dest="database_map", action='append', default=[],
help='the map file for the database [at least 1 REQUIRED]')
parser.add_option("-a", "--algorithm", dest="algorithm", choices = ['BLAST', 'LAST'], default = "BLAST",
help='the algorithm used for computing homology [DEFAULT: BLAST]')
cutoffs_group = OptionGroup(parser, 'Cuttoff Related Options')
cutoffs_group.add_option("--min_score", dest="min_score", type='float', default=20,
help='the minimum bit score cutoff [default = 20 ] ')
cutoffs_group.add_option("--min_query_coverage", dest="min_query_coverage", type='float', default=0,
help='the minimum bit query_coverage cutoff [default = 0 ] ')
cutoffs_group.add_option("--max_evalue", dest="max_evalue", type='float', default=1e-6,
help='the maximum E-value cutoff [ default = 1e-6 ] ')
cutoffs_group.add_option("--min_length", dest="min_length", type='float', default=30,
help='the minimum length of query cutoff [default = 30 ] ')
cutoffs_group.add_option("--max_length", dest="max_length", type='float', default=10000,
help='the maximum length of query cutoff [default = 10000 ] ')
cutoffs_group.add_option("--min_identity", dest="min_identity", type='float', default=20,
help='the minimum identity of query cutoff [default 30 ] ')
cutoffs_group.add_option("--max_identity", dest="max_identity", type='float', default=100,
help='the maximum identity of query cutoff [default = 100 ] ')
cutoffs_group.add_option("--max_gaps", dest="max_gaps", type='float', default=1000,
help='the maximum gaps of query cutoff [default = 1000] ')
cutoffs_group.add_option("--limit", dest="limit", type='float', default=5,
help='max number of hits per query cutoff [default = 5 ] ')
cutoffs_group.add_option("--min_bsr", dest="min_bsr", type='float', default=0.30,
help='minimum BIT SCORE RATIO [default = 0.30 ] ')
parser.add_option_group(cutoffs_group)
output_options_group = OptionGroup(parser, 'Output Options')
output_options_group.add_option("--tax", dest="taxonomy", action='store_true', default=False,
help='add the taxonomy info [useful for refseq] ')
output_options_group.add_option("--remove_tax", dest="remove_taxonomy", action='store_true', default=False,
help='removes the taxonomy from product [useful for refseq] ')
output_options_group.add_option("--remove_ec", dest="remove_ec", action='store_true', default=False,
help='removes the EC number from product [useful for kegg/metacyc] ')
output_options_group.add_option( "--compact_output", dest="compact_output", action='store_true', default=False,
help='compact output [OPTIONAL]')
parser.add_option_group(output_options_group)
bitscore_params = OptionGroup(parser, 'Bit Score Parameters')
bitscore_params.add_option("--lambda", dest="Lambda", default=None, type='float',
help='lambda parameter to compute bit score [useful for BSR] ')
bitscore_params.add_option("--k", dest="k", default=None, type='float',
help='k parameter to compute bit score [useful for BSR] ')
parser.add_option_group(bitscore_params)
def check_arguments(opts, args):
if len(opts.input_blastout) == 0:
print("There sould be at least one blastoutput file")
return False
if len(opts.database_name) == 0:
print("There sould be at least one database name")
return False
if len(opts.database_map) == 0:
print("There sould be at least one database map file name")
return False
if len(opts.input_blastout) != len(opts.database_name) or len(opts.input_blastout) != len(opts.database_map) :
print("The number of database names, blastoutputs and database map file should be equal")
return False
if opts.refscore_file == None:
print("Must specify the refscore")
return False
return True
def create_query_dictionary(blastoutputfile, query_dictionary, algorithm, errorlogger= None ):
seq_beg_pattern = re.compile("^#")
try:
blastoutfh = open( blastoutputfile,'r')
except:
print("ERROR : cannot open B/LAST output file " + blastoutputfile + " to parse ")
return
try:
for line in blastoutfh:
if not seq_beg_pattern.search(line):
words = line.rstrip().split('\t')
if len(words) != 12:
continue
if algorithm =='BLAST':
if not words[1] in query_dictionary:
query_dictionary[words[1]] = True
if algorithm =='LAST':
if not words[1] in query_dictionary:
query_dictionary[words[1]] = True
blastoutfh.close()
except:
eprintf("\nERROR : while reading B/LAST output file " + blastoutputfile + " to parse " +\
" : make sure B/LAST ing was done for the particular database")
if errorlogger:
errorlogger.write("\nERROR : while reading B/LAST output file %s to parse\n" %(blastoutputfile))
errorlogger.write(" : make sure B/LAST ing was done for the particular database\n")
pass
def create_dictionary(databasemapfile, annot_map, query_dictionary, errorlogger= None):
if not query_dictionary:
print("WARNING : empty query dictionary in parse B/LAST")
if errorlogger:
errologger.write("WARNING : empty query dictionary in parse B/LAST\n")
return
seq_beg_pattern = re.compile(">")
try:
dbmapfile = open( databasemapfile,'r')
except:
if errorlogger:
errologger.write("PARSE_BLAST\tERROR\tCannot open database map file %s\t Please check the file manuallyT\n" %(databasemapfile) )
exit_process("ERROR: Cannot open database map file %s\n" %(databasemapfile))
for line in dbmapfile:
if seq_beg_pattern.search(line):
words = line.rstrip().split()
name = words[0].replace('>','',1)
if not name in query_dictionary:
continue
words.pop(0)
if len(words)==0:
annotation = 'hypothetical protein'
else:
annotation = ' '.join(words)
annot_map[name] = annotation
dbmapfile.close()
if len(annot_map)==0:
if errorlogger:
errorlogger.write( "PARSE_BLAST\tERROR\tFile "+databasemapfile+ " seems to be empty!\tCreate datbasemap file\n")
errorlogger.write( "Try re-running after deleting file : %s\n" %(databasemapfile))
exit_process( "no anntations in file :" + databasemapfile)
class BlastOutputParser(object):
commentPATTERN = re.compile(r'^#')
commentLAST_VERSION_PATTERN = re.compile(r'^#.*LAST[\s]+version[\s]+\d+')
def create_refBitScores(self):
refscorefile = open(self.refscore_file,'r')
for line in refscorefile:
words =[ x.strip() for x in line.split('\t') ]
if len(words) == 2:
orfid = ShortenORFId(words[0])
try:
self.refBitScores[orfid]= int((self.Lambda*float(words[1]) - self.lnk )/self.ln2)
except:
self.refBitScores[orfid]= int(1)
refscorefile.close()
def __init__(self, dbname, blastoutput, database_mapfile, refscore_file, opts, errorlogger =None):
self.Size = 10000
self.dbname = dbname
self.ln2 = 0.69314718055994530941
self.lnk = math.log(opts.k)
self.Lambda = opts.Lambda
self.blastoutput = blastoutput
self.database_mapfile =database_mapfile
self.refscore_file = refscore_file
self.annot_map = {}
self.i=0
self.opts = opts
self.hits_counts = {}
self.data = {}
self.refscores = {}
self.refBitScores = {}
self.needToPermute = False;
self.MAX_READ_ERRORS_ALLOWED = 10
self.ERROR_COUNT = 0
self.STEP_NAME = 'PARSE_BLAST'
self.error_and_warning_logger = errorlogger
#print "trying to open blastoutput file " + blastoutput
query_dictionary = {}
try:
create_query_dictionary(self.blastoutput, query_dictionary, self.opts.algorithm, errorlogger = errorlogger)
except:
insert_error(5)
try:
self.blastoutputfile = open(self.blastoutput,'r')
except:
eprintf("\nERROR : cannot open B/LAST output file " + blastoutput + " to parse "+\
" : make sure \"B/LAST\"ing was done for the particular database" )
if self.error_and_warning_logger:
self.error_and_warning_logger.write("ERROR : cannot open B/LAST output file %s %s to parse \n" +\
" : make sure \"B/LAST\"ing was done for "+\
"the particular database" %(blastoutput) )
insert_error(5)
exit_process( "Cannot open B/LAST output file " + blastoutput )
try:
self.create_refBitScores()
except:
print(traceback.print_exc(10))
exit_process( "Error while reading from B/LAST refscore file " + self.refscore_file )
try:
create_dictionary(database_mapfile, self.annot_map, query_dictionary)
query_dictionary = {}
except AttributeError:
eprintf("Cannot read the map file for database : %s\n" % (dbname))
if errorlogger!= None:
errorlogger.write("PARSE_BLAST\tERROR\tCannot read the map file %s for database : %s\tDelete the formatted files for the database in the \"formatted\" folder\n" %(database_mapfile, dbname))
exit_process("Cannot read the map file for database " + dbname)
def setMaxErrorsLimit(self, max):
self.MAX_READ_ERRORS_ALLOWED = max
def setErrorAndWarningLogger(self, logger):
self.error_and_warning_logger = logger
def setSTEP_NAME(self, step_name):
self.STEP_NAME = step_name
def incErrorCount(self):
self.ERROR_COUNT += 1
def maxErrorsReached(self):
return (self.ERROR_COUNT > self.MAX_READ_ERRORS_ALLOWED)
def __iter__(self):
return self
def permuteForLAST(self, words):
try :
temp = copy(words)
words[0] = temp[6] # query
words[1] = temp[1] # target
words[2] = 100.0 # percent id
words[3] = temp[3] #aln length
words[6] = temp[2]
words[7] = int(temp[2]) + int(temp[3]) - 1
words[10] = 0.0 # evalue
words[11] = temp[0]
except:
eprintf("ERROR : Invalid B/LAST output file %s \n" % (self.blastoutput))
if self.error_and_warning_logger:
self.error_and_warning_logger.write("ERROR : Invalid B/LAST output file" %(self.blastoutput))
exit_process( "ERROR : Invalid B/LAST output file %s " % (self.blastoutput))
def refillBuffer(self):
i = 0
self.lines = []
line = True # self.blastoutputfile.readline()
while line and i < self.Size:
line=self.blastoutputfile.readline()
if self.commentPATTERN.match(line):
if self.commentLAST_VERSION_PATTERN.match(line) ==False:
self.needToPermute = True
continue
self.lines.append(line)
if not line:
break
i += 1
self.size = len(self.lines)
def __next__(self):
if self.i % self.Size ==0:
self.refillBuffer()
if self.i % self.Size < self.size:
words = [ x.strip() for x in self.lines[self.i % self.Size].rstrip().split('\t')]
if len(words) != 12:
self.i = self.i + 1
return None
'''shorten the ORF id'''
words[0] = ShortenORFId(words[0])
#if self.opts.algorithm =='LAST':
if self.needToPermute:
self.permuteForLAST(words)
if not words[0] in self.hits_counts:
self.hits_counts[words[0]] = 0
if self.hits_counts[words[0]] >= self.opts.limit:
self.i = self.i + 1
return None
if len(words) != 12 or not self.isWithinCutoffs(words, self.data, self.opts, self.annot_map, self.refBitScores):
self.i = self.i + 1
return None
self.hits_counts[words[0]] += 1
self.i = self.i + 1
try:
return self.data
except:
return None
else:
self.blastoutputfile.close()
raise StopIteration()
def isWithinCutoffs(self, words, data, cutoffs, annot_map, refbitscores):
try:
orfid = ShortORFId(words[0])
except:
orfid = words[0]
data['query'] = orfid
try:
data['target'] = words[1]
except:
data['target'] = 0
try:
data['q_length'] = int(words[7]) - int(words[6]) + 1
except:
data['q_length'] = 0
try:
data['bitscore'] = float(words[11])
except:
data['bitscore'] = 0
try:
data['bsr'] = float(words[11])/refbitscores[orfid]
except:
#print "words 0 " + str(refscores[words[0]])
#print "words 11 " + str( words[11])
data['bsr'] = 0
try:
data['expect'] = float(words[10])
except:
data['expect'] = 0
try:
data['aln_length'] = float(words[3])
except:
data['aln_length'] = 0
try:
data['identity'] = float(words[2])
except:
data['identity'] = 0
try:
data['product'] = annot_map[words[1]]
except:
eprintf("Sequence with name \"" + words[1] + "\" is not present in map file\n")
if self.error_and_warning_logger:
self.error_and_warning_logger.write("Sequence with name %s is not present in map file " %(words[1] ))
self.incErrorCount()
if self.maxErrorsReached():
if self.error_and_warning_logger:
self.error_and_warning_logger.write("Number of sequence absent in map file %s exceeds %d" %(self.blastoutput, self.ERROR_COUNT ))
exit_process("Number of sequence absent in map file %s exceeds %d" %(self.blastoutput, self.ERROR_COUNT ))
data['product'] = 'hypothetical protein'
try:
m = re.search(r'(\d+[.]\d+[.]\d+[.]\d+)', data['product'])
if m != None:
data['ec'] = m.group(0)
else:
data['ec'] = ''
except:
data['ec'] = ''
if cutoffs.taxonomy:
try:
m = re.search(r'\[([^\[]+)\]', data['product'])
if m != None:
data['taxonomy'] = m.group(1)
else:
data['taxonomy'] = ''
except:
data['taxonomy'] = ''
if cutoffs.remove_taxonomy:
try:
data['product'] = re.sub(r'\[([^\[]+)\]','', data['product'])
except:
data['product'] = ''
if cutoffs.remove_ec:
try:
data['product'] = re.sub(r'\([Ee][Ce][:]\d+[.]\d+[.]\d+[.]\d+\)', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.]\d+[.]\d+\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.]\d+[.-]\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.-.-]\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.-.-.-]\]', '', data['product'])
except:
data['product'] = ''
if data['q_length'] < cutoffs.min_length:
return False
if data['bitscore'] < cutoffs.min_score:
return False
if data['expect'] > cutoffs.max_evalue:
return False
if data['identity'] < cutoffs.min_identity:
return False
if data['bsr'] < cutoffs.min_bsr:
return False
#min_length'
#'min_score'
#'max_evalue'
# 'min_identity'
#'limit'
#'max_length'
#'min_query_coverage'
#'max_gaps'
#min_bsr'
return True
# compute the refscores
def process_blastoutput(dbname, blastoutput, mapfile, refscore_file, opts, errorlogger = None):
blastparser = BlastOutputParser(dbname, blastoutput, mapfile, refscore_file, opts, errorlogger = errorlogger)
blastparser.setMaxErrorsLimit(100)
blastparser.setErrorAndWarningLogger(errorlogger)
blastparser.setSTEP_NAME('PARSE BLAST')
fields = ['target','q_length', 'bitscore', 'bsr', 'expect', 'aln_length', 'identity', 'ec' ]
if opts.taxonomy:
fields.append('taxonomy')
fields.append('product')
output_blastoutput_parsed = opts.parsed_output
# temporary file is used to deal with incomplete processing of the file
output_blastoutput_parsed_tmp = output_blastoutput_parsed + ".tmp"
try:
outputfile = open(output_blastoutput_parsed_tmp, 'w')
except:
if errorlogger:
errorlogger.write("PARSE_BLAST\tERROR\tCannot open temp file %s to sort\tfor reference db\n" %(soutput_blastoutput_parsed_tmp, dbname))
exit_process("PARSE_BLAST\tERROR\tCannot open temp file %s to sort\tfor reference db\n" %(soutput_blastoutput_parsed_tmp, dbname))
# write the headers out
fprintf(outputfile, "#%s",'query')
for field in fields:
fprintf(outputfile,"\t%s",field)
fprintf(outputfile, "\n")
pattern = re.compile(r'' + "(\d+_\d+)$")
count = 0;
uniques = {}
for data in blastparser:
if not data:
continue
try:
fprintf(outputfile, "%s",data['query'])
result = pattern.search(data['query'])
if result:
name = result.group(1)
uniques[name] =True
except:
print('data is : ', data, '\n')
return count, len(uniques)
for field in fields:
fprintf(outputfile, "\t%s",data[field])
fprintf(outputfile, "\n")
count += 1
outputfile.close()
rename(output_blastoutput_parsed_tmp, output_blastoutput_parsed)
return count, len(uniques)
# the main function
def main(argv, errorlogger = None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if not check_arguments(opts, args):
print (sage)
sys.exit(0)
if errorlogger:
errorlogger.write("#STEP\tPARSE_BLAST\n")
if opts.Lambda == None or opts.k == None:
if opts.algorithm=='LAST':
opts.Lambda = 0.300471
opts.k = 0.103946
if opts.algorithm=='BLAST':
opts.Lambda = 0.267
opts.k = 0.0410
dictionary={}
priority = 5000;
priority1= 5500;
for dbname, blastoutput, mapfile in zip( opts.database_name, opts.input_blastout, opts.database_map):
temp_refscore = ""
temp_refscore = opts.refscore_file
if opts.parsed_output==None:
opts.parsed_output = blastoutput + ".parsed.txt"
count, unique_count = process_blastoutput(dbname, blastoutput, mapfile, temp_refscore, opts, errorlogger = errorlogger)
if runstatslogger:
runstatslogger.write("%s\tTotal Protein Annotations %s (%s)\t%s\n" %( str(priority), dbname, opts.algorithm, str(count)))
runstatslogger.write("%s\tNumber of ORFs with hits in %s (%s)\t%s\n" %( str(priority1), dbname, opts.algorithm, str(unique_count)))
def MetaPathways_parse_blast(argv, errorlogger = None, runstatslogger = None):
createParser()
try:
main(argv, errorlogger = errorlogger, runstatslogger = runstatslogger)
except:
insert_error(5)
return (0,'')
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
|
|
import os
import sys
import virtualenv
import py
import pytest
import pytest_cov
pytest_plugins = 'pytester', 'cov'
SCRIPT = '''
import sys
def pytest_generate_tests(metafunc):
for i in range(10):
metafunc.addcall()
def test_foo():
assert True
if sys.version_info[0] > 5:
assert False
'''
SCRIPT_CHILD = '''
import sys
idx = int(sys.argv[1])
if idx == 0:
pass
if idx == 1:
pass
'''
SCRIPT_PARENT = '''
import subprocess
import sys
def pytest_generate_tests(metafunc):
for i in range(2):
metafunc.addcall(funcargs=dict(idx=i))
def test_foo(idx):
out, err = subprocess.Popen(
[sys.executable, 'child_script.py', str(idx)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_FUNCARG = '''
import coverage
def test_foo(cov):
assert isinstance(cov, coverage.control.coverage)
'''
SCRIPT_FUNCARG_NOT_ACTIVE = '''
def test_foo(cov):
assert cov is None
'''
MULTIPROCESSING_SCRIPT = '''
import multiprocessing
def target_fn():
a = True
return a
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
p.join()
'''
SCRIPT_FAIL = '''
def test_fail():
assert False
'''
SCRIPT_RESULT = '8 * 88%'
CHILD_SCRIPT_RESULT = '6 * 100%'
PARENT_SCRIPT_RESULT = '8 * 100%'
def test_central(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central * %s *' % SCRIPT_RESULT,
'*10 passed*'
])
assert result.ret == 0
def test_no_cov_on_fail(testdir):
script = testdir.makepyfile(SCRIPT_FAIL)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--no-cov-on-fail',
script)
assert 'coverage: platform' not in result.stdout.str()
result.stdout.fnmatch_lines(['*1 failed*'])
def test_dist_collocated(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_collocated * %s *' % SCRIPT_RESULT,
'*10 passed*'
])
assert result.ret == 0
def test_dist_not_collocated(testdir):
script = testdir.makepyfile(SCRIPT)
dir1 = testdir.mkdir('dir1')
dir2 = testdir.mkdir('dir2')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % script.basename,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_not_collocated * %s *' % SCRIPT_RESULT,
'*10 passed*'
])
assert result.ret == 0
def test_central_subprocess(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script * %s *' % CHILD_SCRIPT_RESULT,
'parent_script * %s *' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_dist_subprocess_collocated(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script * %s *' % CHILD_SCRIPT_RESULT,
'parent_script * %s *' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_dist_subprocess_not_collocated(testdir, tmpdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
child_script = scripts.dirpath().join('child_script.py')
dir1 = tmpdir.mkdir('dir1')
dir2 = tmpdir.mkdir('dir2')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % child_script,
'--rsyncdir=%s' % parent_script,
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script * %s *' % CHILD_SCRIPT_RESULT,
'parent_script * %s *' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_empty_report(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=non_existent_module',
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*10 passed*'
])
assert result.ret == 0
matching_lines = [line for line in result.outlines if '%' in line]
assert not matching_lines
def test_dist_missing_data(testdir):
venv_path = os.path.join(str(testdir.tmpdir), 'venv')
virtualenv.create_environment(venv_path)
if sys.platform == 'win32':
exe = os.path.join(venv_path, 'Scripts', 'python.exe')
else:
exe = os.path.join(venv_path, 'bin', 'python')
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//python=%s' % exe,
script)
result.stdout.fnmatch_lines([
'*- coverage: failed slaves -*'
])
assert result.ret == 0
def test_funcarg(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_funcarg * 3 * 100%*',
'*1 passed*'
])
assert result.ret == 0
def test_funcarg_not_active(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG_NOT_ACTIVE)
result = testdir.runpytest('-v',
script)
result.stdout.fnmatch_lines([
'*1 passed*'
])
assert result.ret == 0
def test_multiprocessing_subprocess(testdir):
py.test.importorskip('multiprocessing.util')
script = testdir.makepyfile(MULTIPROCESSING_SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_subprocess * 8 * 100%*',
'*1 passed*'
])
assert result.ret == 0
MODULE = '''
def func():
return 1
'''
CONFTEST = '''
import mod
mod.func()
'''
BASIC_TEST = '''
def test_basic():
assert True
'''
CONF_RESULT = 'mod * 2 * 100% *'
def test_cover_conftest(testdir):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([CONF_RESULT])
def test_cover_conftest_dist(testdir):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([CONF_RESULT])
COVERAGERC = '''
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
raise NotImplementedError
'''
EXCLUDED_TEST = '''
def func():
raise NotImplementedError
def test_basic():
assert True
'''
EXCLUDED_RESULT = '3 * 100% *'
def test_coveragerc(testdir):
testdir.makefile('', coveragerc=COVERAGERC)
script = testdir.makepyfile(EXCLUDED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['test_coveragerc * %s' % EXCLUDED_RESULT])
def test_coveragerc_dist(testdir):
testdir.makefile('', coveragerc=COVERAGERC)
script = testdir.makepyfile(EXCLUDED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-n', '2',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(
['test_coveragerc_dist * %s' % EXCLUDED_RESULT])
CLEAR_ENVIRON_TEST = '''
import os
def test_basic():
os.environ.clear()
'''
def test_clear_environ(testdir):
script = testdir.makepyfile(CLEAR_ENVIRON_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
SCRIPT_SIMPLE = '''
def test_foo():
assert 1 == 1
assert True
'''
SCRIPT_SIMPLE_RESULT = '3 * 100%'
@pytest.mark.skipif('sys.platform == "win32"')
def test_dist_boxed(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--boxed',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_boxed * %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
def test_not_started_plugin_does_not_fail(testdir):
plugin = pytest_cov.CovPlugin(None, None, start=False)
plugin.pytest_sessionfinish(None, None)
plugin.pytest_terminal_summary(None)
|
|
__author__ = 'Georgios Rizos ([email protected])'
import time
import numpy as np
from scipy.sparse import issparse
from sklearn.multiclass import OneVsRestClassifier
from sklearn import svm
from sklearn.preprocessing import normalize
from reveal_graph_embedding.datautil.snow_datautil import snow_read_data
from reveal_graph_embedding.datautil.asu_datautil import asu_read_data
from reveal_graph_embedding.datautil.insight_datautil import insight_read_data
from reveal_graph_embedding.embedding.arcte.arcte import arcte
from reveal_graph_embedding.embedding.competing_methods import laplacian_eigenmaps, replicator_eigenmaps, louvain,\
mroc, base_communities
from reveal_graph_embedding.embedding.common import normalize_columns
from reveal_graph_embedding.learning.holdout import generate_folds
from reveal_graph_embedding.embedding.community_weighting import chi2_contingency_matrix,\
peak_snr_weight_aggregation, community_weighting
from reveal_graph_embedding.learning import evaluation
def run_experiment(dataset_name,
dataset_folder,
feature_extraction_method_name,
percentages,
trial_num,
thread_num,
feature_extraction_parameters,
classifier_parameters):
if dataset_name == "snow2014":
adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories = read_snow2014graph_data(dataset_folder)
elif dataset_name == "flickr":
adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories = read_asu_data(dataset_folder)
elif dataset_name == "youtube":
adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories = read_asu_data(dataset_folder)
elif dataset_name == "politicsuk":
adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories = read_insight_data(dataset_folder)
else:
print("Invalid dataset name.")
raise RuntimeError
print("Graphs and labels read.")
feature_matrix,\
feature_extraction_elapsed_time = feature_extraction(adjacency_matrix,
feature_extraction_method_name,
thread_num,
feature_extraction_parameters)
print("Feature extraction elapsed time: ", feature_extraction_elapsed_time)
if feature_extraction_parameters["community_weighting"] is None:
pass
elif feature_extraction_parameters["community_weighting"] == "chi2":
feature_matrix = normalize_columns(feature_matrix)
elif feature_extraction_parameters["community_weighting"] == "ivf":
feature_matrix = normalize_columns(feature_matrix)
else:
print("Invalid community weighting selection.")
raise RuntimeError
C = classifier_parameters["C"]
fit_intercept = classifier_parameters["fit_intercept"]
for p in np.arange(percentages.size):
percentage = percentages[p]
# Initialize the metric storage arrays to zero
macro_F1 = np.zeros(trial_num, dtype=np.float)
micro_F1 = np.zeros(trial_num, dtype=np.float)
folds = generate_folds(node_label_matrix,
labelled_node_indices,
number_of_categories,
percentage,
trial_num)
for trial in np.arange(trial_num):
train, test = next(folds)
########################################################################################################
# Separate train and test sets
########################################################################################################
X_train, X_test, y_train, y_test = feature_matrix[train, :],\
feature_matrix[test, :],\
node_label_matrix[train, :],\
node_label_matrix[test, :]
if issparse(feature_matrix):
if feature_extraction_parameters["community_weighting"] == "chi2":
contingency_matrix = chi2_contingency_matrix(X_train, y_train)
community_weights = peak_snr_weight_aggregation(contingency_matrix)
X_train, X_test = community_weighting(X_train, X_test, community_weights)
else:
X_train = normalize(X_train, norm="l2")
X_test = normalize(X_test, norm="l2")
############################################################################################################
# Train model
############################################################################################################
# Train classifier.
start_time = time.time()
model = OneVsRestClassifier(svm.LinearSVC(C=C,
random_state=None,
dual=False,
fit_intercept=fit_intercept),
n_jobs=thread_num)
model.fit(X_train, y_train)
hypothesis_training_time = time.time() - start_time
print('Model fitting time: ', hypothesis_training_time)
############################################################################################################
# Make predictions
############################################################################################################
start_time = time.time()
y_pred = model.decision_function(X_test)
prediction_time = time.time() - start_time
print('Prediction time: ', prediction_time)
############################################################################################################
# Calculate measures
############################################################################################################
y_pred = evaluation.form_node_label_prediction_matrix(y_pred, y_test)
measures = evaluation.calculate_measures(y_pred, y_test)
macro_F1[trial] = measures[4]
micro_F1[trial] = measures[5]
# print('Trial ', trial+1, ':')
# print(' Macro-F1: ', macro_F1[trial])
# print(' Micro-F1: ', micro_F1[trial])
# print('\n')
################################################################################################################
# Experiment results
################################################################################################################
print(percentage)
print('\n')
print('Macro F1 average: ', np.mean(macro_F1))
print('Micro F1 average: ', np.mean(micro_F1))
print('Macro F1 std: ', np.std(macro_F1))
print('Micro F1 std: ', np.std(micro_F1))
def read_snow2014graph_data(dataset_folder):
adjacency_matrix = snow_read_data.read_adjacency_matrix(file_path=dataset_folder + "/men_ret_graph.tsv",
separator="\t")
node_label_matrix,\
labelled_node_indices,\
number_of_categories = snow_read_data.read_node_label_matrix(file_path=dataset_folder + "/user_label_matrix.tsv",
separator="\t")
return adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories
def read_asu_data(dataset_folder):
adjacency_matrix = asu_read_data.read_adjacency_matrix(file_path=dataset_folder + "/edges.csv",
separator=",")
node_label_matrix,\
labelled_node_indices,\
number_of_categories = asu_read_data.read_node_label_matrix(file_path=dataset_folder + "/group-edges.csv",
separator=",",
number_of_nodes=adjacency_matrix.shape[0])
return adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories
def read_insight_data(dataset_folder):
adjacency_matrix = insight_read_data.read_adjacency_matrix(file_path=dataset_folder + "/men_ret_graph.tsv",
separator="\t")
node_label_matrix,\
labelled_node_indices,\
number_of_categories = insight_read_data.read_node_label_matrix(file_path=dataset_folder + "/user_label_matrix.tsv",
separator="\t")
return adjacency_matrix,\
node_label_matrix,\
labelled_node_indices,\
number_of_categories
def feature_extraction(adjacency_matrix,
feature_extraction_method_name,
thread_num,
feature_extraction_parameters):
start_time = time.time()
if feature_extraction_method_name == "arcte":
epsilon = feature_extraction_parameters["epsilon"]
rho = feature_extraction_parameters["rho"]
feature_matrix = arcte(adjacency_matrix, rho, epsilon, thread_num)
elif feature_extraction_method_name == "mroc":
alpha = feature_extraction_parameters["alpha"]
feature_matrix = mroc(adjacency_matrix, alpha)
elif feature_extraction_method_name == "louvain":
feature_matrix = louvain(adjacency_matrix)
elif feature_extraction_method_name == "basecomm":
feature_matrix = base_communities(adjacency_matrix)
elif feature_extraction_method_name == "lapeig":
dimensionality = feature_extraction_parameters["dimensionality"]
feature_matrix = laplacian_eigenmaps(adjacency_matrix, dimensionality)
elif feature_extraction_method_name == "repeig":
dimensionality = feature_extraction_parameters["dimensionality"]
feature_matrix = replicator_eigenmaps(adjacency_matrix, dimensionality)
else:
print("Invalid feature extraction name.")
raise RuntimeError
elapsed_time = time.time() - start_time
return feature_matrix, elapsed_time
|
|
# Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Jason Narad <[email protected]>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
from __future__ import print_function
import math
import logging
from six.moves import range
from nltk.parse.dependencygraph import DependencyGraph
logger = logging.getLogger(__name__)
#################################################################
# DependencyScorerI - Interface for Graph-Edge Weight Calculation
#################################################################
class DependencyScorerI(object):
"""
A scorer for calculated the weights on the edges of a weighted
dependency graph. This is used by a
``ProbabilisticNonprojectiveParser`` to initialize the edge
weights of a ``DependencyGraph``. While typically this would be done
by training a binary classifier, any class that can return a
multidimensional list representation of the edge weights can
implement this interface. As such, it has no necessary
fields.
"""
def __init__(self):
if self.__class__ == DependencyScorerI:
raise TypeError('DependencyScorerI is an abstract interface')
def train(self, graphs):
"""
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
Typically the edges present in the graphs can be used as
positive training examples, and the edges not present as negative
examples.
"""
raise NotImplementedError()
def score(self, graph):
"""
:type graph: DependencyGraph
:param graph: A dependency graph whose set of edges need to be
scored.
:rtype: A three-dimensional list of numbers.
:return: The score is returned in a multidimensional(3) list, such
that the outer-dimension refers to the head, and the
inner-dimension refers to the dependencies. For instance,
scores[0][1] would reference the list of scores corresponding to
arcs from node 0 to node 1. The node's 'address' field can be used
to determine its number identification.
For further illustration, a score list corresponding to Fig.2 of
Keith Hall's 'K-best Spanning Tree Parsing' paper:
scores = [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
When used in conjunction with a MaxEntClassifier, each score would
correspond to the confidence of a particular edge being classified
with the positive training examples.
"""
raise NotImplementedError()
#################################################################
# NaiveBayesDependencyScorer
#################################################################
class NaiveBayesDependencyScorer(DependencyScorerI):
"""
A dependency scorer built around a MaxEnt classifier. In this
particular class that classifier is a ``NaiveBayesClassifier``.
It uses head-word, head-tag, child-word, and child-tag features
for classification.
>>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2
>>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry]
>>> npp = ProbabilisticNonprojectiveParser()
>>> npp.train(graphs, NaiveBayesDependencyScorer())
>>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc'])
>>> len(list(parses))
1
"""
def __init__(self):
pass # Do nothing without throwing error
def train(self, graphs):
"""
Trains a ``NaiveBayesClassifier`` using the edges present in
graphs list as positive examples, the edges not present as
negative examples. Uses a feature vector of head-word,
head-tag, child-word, and child-tag.
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
"""
from nltk.classify import NaiveBayesClassifier
# Create training labeled training examples
labeled_examples = []
for graph in graphs:
for head_node in graph.nodes.values():
for child_index, child_node in graph.nodes.items():
if child_index in head_node['deps']:
label = "T"
else:
label = "F"
labeled_examples.append(
(
dict(
a=head_node['word'],
b=head_node['tag'],
c=child_node['word'],
d=child_node['tag'],
),
label,
)
)
self.classifier = NaiveBayesClassifier.train(labeled_examples)
def score(self, graph):
"""
Converts the graph into a feature-based representation of
each edge, and then assigns a score to each based on the
confidence of the classifier in assigning it to the
positive label. Scores are returned in a multidimensional list.
:type graph: DependencyGraph
:param graph: A dependency graph to score.
:rtype: 3 dimensional list
:return: Edge scores for the graph parameter.
"""
# Convert graph to feature representation
edges = []
for head_node in graph.nodes.values():
for child_node in graph.nodes.values():
edges.append(
(
dict(
a=head_node['word'],
b=head_node['tag'],
c=child_node['word'],
d=child_node['tag'],
)
)
)
# Score edges
edge_scores = []
row = []
count = 0
for pdist in self.classifier.prob_classify_many(edges):
logger.debug('%.4f %.4f', pdist.prob('T'), pdist.prob('F'))
# smoothing in case the probability = 0
row.append([math.log(pdist.prob("T")+0.00000000001)])
count += 1
if count == len(graph.nodes):
edge_scores.append(row)
row = []
count = 0
return edge_scores
#################################################################
# A Scorer for Demo Purposes
#################################################################
# A short class necessary to show parsing example from paper
class DemoScorer(DependencyScorerI):
def train(self, graphs):
print('Training...')
def score(self, graph):
# scores for Keith Hall 'K-best Spanning Tree Parsing' paper
return [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
#################################################################
# Non-Projective Probabilistic Parsing
#################################################################
class ProbabilisticNonprojectiveParser(object):
"""A probabilistic non-projective dependency parser.
Nonprojective dependencies allows for "crossing branches" in the parse tree
which is necessary for representing particular linguistic phenomena, or even
typical parses in some languages. This parser follows the MST parsing
algorithm, outlined in McDonald(2005), which likens the search for the best
non-projective parse to finding the maximum spanning tree in a weighted
directed graph.
>>> class Scorer(DependencyScorerI):
... def train(self, graphs):
... pass
...
... def score(self, graph):
... return [
... [[], [5], [1], [1]],
... [[], [], [11], [4]],
... [[], [10], [], [5]],
... [[], [8], [8], []],
... ]
>>> npp = ProbabilisticNonprojectiveParser()
>>> npp.train([], Scorer())
>>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None])
>>> len(list(parses))
1
Rule based example
------------------
>>> from nltk.grammar import DependencyGrammar
>>> grammar = DependencyGrammar.fromstring('''
... 'taught' -> 'play' | 'man'
... 'man' -> 'the' | 'in'
... 'in' -> 'corner'
... 'corner' -> 'the'
... 'play' -> 'golf' | 'dachshund' | 'to'
... 'dachshund' -> 'his'
... ''')
>>> ndp = NonprojectiveDependencyParser(grammar)
>>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])
>>> len(list(parses))
4
"""
def __init__(self):
"""
Creates a new non-projective parser.
"""
logging.debug('initializing prob. nonprojective...')
def train(self, graphs, dependency_scorer):
"""
Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects,
and establishes this as the parser's scorer. This is used to
initialize the scores on a ``DependencyGraph`` during the parsing
procedure.
:type graphs: list(DependencyGraph)
:param graphs: A list of dependency graphs to train the scorer.
:type dependency_scorer: DependencyScorerI
:param dependency_scorer: A scorer which implements the
``DependencyScorerI`` interface.
"""
self._scorer = dependency_scorer
self._scorer.train(graphs)
def initialize_edge_scores(self, graph):
"""
Assigns a score to every edge in the ``DependencyGraph`` graph.
These scores are generated via the parser's scorer which
was assigned during the training process.
:type graph: DependencyGraph
:param graph: A dependency graph to assign scores to.
"""
self.scores = self._scorer.score(graph)
def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph):
"""
Takes a list of nodes that have been identified to belong to a cycle,
and collapses them into on larger node. The arcs of all nodes in
the graph must be updated to account for this.
:type new_node: Node.
:param new_node: A Node (Dictionary) to collapse the cycle nodes into.
:type cycle_path: A list of integers.
:param cycle_path: A list of node addresses, each of which is in the cycle.
:type g_graph, b_graph, c_graph: DependencyGraph
:param g_graph, b_graph, c_graph: Graphs which need to be updated.
"""
logger.debug('Collapsing nodes...')
# Collapse all cycle nodes into v_n+1 in G_Graph
for cycle_node_index in cycle_path:
g_graph.remove_by_address(cycle_node_index)
g_graph.add_node(new_node)
g_graph.redirect_arcs(cycle_path, new_node['address'])
def update_edge_scores(self, new_node, cycle_path):
"""
Updates the edge scores to reflect a collapse operation into
new_node.
:type new_node: A Node.
:param new_node: The node which cycle nodes are collapsed into.
:type cycle_path: A list of integers.
:param cycle_path: A list of node addresses that belong to the cycle.
"""
logger.debug('cycle %s', cycle_path)
cycle_path = self.compute_original_indexes(cycle_path)
logger.debug('old cycle %s', cycle_path)
logger.debug('Prior to update: %s', self.scores)
for i, row in enumerate(self.scores):
for j, column in enumerate(self.scores[i]):
logger.debug(self.scores[i][j])
if (
j in cycle_path
and i not in cycle_path
and self.scores[i][j]
):
subtract_val = self.compute_max_subtract_score(j, cycle_path)
logger.debug('%s - %s', self.scores[i][j], subtract_val)
new_vals = []
for cur_val in self.scores[i][j]:
new_vals.append(cur_val - subtract_val)
self.scores[i][j] = new_vals
for i, row in enumerate(self.scores):
for j, cell in enumerate(self.scores[i]):
if i in cycle_path and j in cycle_path:
self.scores[i][j] = []
logger.debug('After update: %s', self.scores)
def compute_original_indexes(self, new_indexes):
"""
As nodes are collapsed into others, they are replaced
by the new node in the graph, but it's still necessary
to keep track of what these original nodes were. This
takes a list of node addresses and replaces any collapsed
node addresses with their original addresses.
:type new_indexes: A list of integers.
:param new_indexes: A list of node addresses to check for
subsumed nodes.
"""
swapped = True
while swapped:
originals = []
swapped = False
for new_index in new_indexes:
if new_index in self.inner_nodes:
for old_val in self.inner_nodes[new_index]:
if old_val not in originals:
originals.append(old_val)
swapped = True
else:
originals.append(new_index)
new_indexes = originals
return new_indexes
def compute_max_subtract_score(self, column_index, cycle_indexes):
"""
When updating scores the score of the highest-weighted incoming
arc is subtracted upon collapse. This returns the correct
amount to subtract from that edge.
:type column_index: integer.
:param column_index: A index representing the column of incoming arcs
to a particular node being updated
:type cycle_indexes: A list of integers.
:param cycle_indexes: Only arcs from cycle nodes are considered. This
is a list of such nodes addresses.
"""
max_score = -100000
for row_index in cycle_indexes:
for subtract_val in self.scores[row_index][column_index]:
if subtract_val > max_score:
max_score = subtract_val
return max_score
def best_incoming_arc(self, node_index):
"""
Returns the source of the best incoming arc to the
node with address: node_index
:type node_index: integer.
:param node_index: The address of the 'destination' node,
the node that is arced to.
"""
originals = self.compute_original_indexes([node_index])
logger.debug('originals: %s', originals)
max_arc = None
max_score = None
for row_index in range(len(self.scores)):
for col_index in range(len(self.scores[row_index])):
# print self.scores[row_index][col_index]
if col_index in originals and (max_score is None or self.scores[row_index][col_index] > max_score):
max_score = self.scores[row_index][col_index]
max_arc = row_index
logger.debug('%s, %s', row_index, col_index)
logger.debug(max_score)
for key in self.inner_nodes:
replaced_nodes = self.inner_nodes[key]
if max_arc in replaced_nodes:
return key
return max_arc
def original_best_arc(self, node_index):
originals = self.compute_original_indexes([node_index])
max_arc = None
max_score = None
max_orig = None
for row_index in range(len(self.scores)):
for col_index in range(len(self.scores[row_index])):
if col_index in originals and (max_score is None or self.scores[row_index][col_index] > max_score):
max_score = self.scores[row_index][col_index]
max_arc = row_index
max_orig = col_index
return [max_arc, max_orig]
def parse(self, tokens, tags):
"""
Parses a list of tokens in accordance to the MST parsing algorithm
for non-projective dependency parses. Assumes that the tokens to
be parsed have already been tagged and those tags are provided. Various
scoring methods can be used by implementing the ``DependencyScorerI``
interface and passing it to the training algorithm.
:type tokens: list(str)
:param tokens: A list of words or punctuation to be parsed.
:type tags: list(str)
:param tags: A list of tags corresponding by index to the words in the tokens list.
:return: An iterator of non-projective parses.
:rtype: iter(DependencyGraph)
"""
self.inner_nodes = {}
# Initialize g_graph
g_graph = DependencyGraph()
for index, token in enumerate(tokens):
g_graph.nodes[index + 1].update(
{
'word': token,
'tag': tags[index],
'rel': 'NTOP',
'address': index + 1,
}
)
#print (g_graph.nodes)
# Fully connect non-root nodes in g_graph
g_graph.connect_graph()
original_graph = DependencyGraph()
for index, token in enumerate(tokens):
original_graph.nodes[index + 1].update(
{
'word': token,
'tag': tags[index],
'rel': 'NTOP',
'address': index+1,
}
)
b_graph = DependencyGraph()
c_graph = DependencyGraph()
for index, token in enumerate(tokens):
c_graph.nodes[index + 1].update(
{
'word': token,
'tag': tags[index],
'rel': 'NTOP',
'address': index + 1,
}
)
# Assign initial scores to g_graph edges
self.initialize_edge_scores(g_graph)
logger.debug(self.scores)
# Initialize a list of unvisited vertices (by node address)
unvisited_vertices = [
vertex['address'] for vertex in c_graph.nodes.values()
]
# Iterate over unvisited vertices
nr_vertices = len(tokens)
betas = {}
while unvisited_vertices:
# Mark current node as visited
current_vertex = unvisited_vertices.pop(0)
logger.debug('current_vertex: %s', current_vertex)
# Get corresponding node n_i to vertex v_i
current_node = g_graph.get_by_address(current_vertex)
logger.debug('current_node: %s', current_node)
# Get best in-edge node b for current node
best_in_edge = self.best_incoming_arc(current_vertex)
betas[current_vertex] = self.original_best_arc(current_vertex)
logger.debug('best in arc: %s --> %s', best_in_edge, current_vertex)
# b_graph = Union(b_graph, b)
for new_vertex in [current_vertex, best_in_edge]:
b_graph.nodes[new_vertex].update(
{
'word': 'TEMP',
'rel': 'NTOP',
'address': new_vertex,
}
)
b_graph.add_arc(best_in_edge, current_vertex)
# Beta(current node) = b - stored for parse recovery
# If b_graph contains a cycle, collapse it
cycle_path = b_graph.contains_cycle()
if cycle_path:
# Create a new node v_n+1 with address = len(nodes) + 1
new_node = {
'word': 'NONE',
'rel': 'NTOP',
'address': nr_vertices + 1,
}
# c_graph = Union(c_graph, v_n+1)
c_graph.add_node(new_node)
# Collapse all nodes in cycle C into v_n+1
self.update_edge_scores(new_node, cycle_path)
self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph)
for cycle_index in cycle_path:
c_graph.add_arc(new_node['address'], cycle_index)
# self.replaced_by[cycle_index] = new_node['address']
self.inner_nodes[new_node['address']] = cycle_path
# Add v_n+1 to list of unvisited vertices
unvisited_vertices.insert(0, nr_vertices + 1)
# increment # of nodes counter
nr_vertices += 1
# Remove cycle nodes from b_graph; B = B - cycle c
for cycle_node_address in cycle_path:
b_graph.remove_by_address(cycle_node_address)
logger.debug('g_graph: %s', g_graph)
logger.debug('b_graph: %s', b_graph)
logger.debug('c_graph: %s', c_graph)
logger.debug('Betas: %s', betas)
logger.debug('replaced nodes %s', self.inner_nodes)
# Recover parse tree
logger.debug('Final scores: %s', self.scores)
logger.debug('Recovering parse...')
for i in range(len(tokens) + 1, nr_vertices + 1):
betas[betas[i][1]] = betas[i]
logger.debug('Betas: %s', betas)
for node in original_graph.nodes.values():
# TODO: It's dangerous to assume that deps it a dictionary
# because it's a default dictionary. Ideally, here we should not
# be concerned how dependencies are stored inside of a dependency
# graph.
node['deps'] = {}
for i in range(1, len(tokens) + 1):
original_graph.add_arc(betas[i][0], betas[i][1])
logger.debug('Done.')
yield original_graph
#################################################################
# Rule-based Non-Projective Parser
#################################################################
class NonprojectiveDependencyParser(object):
"""
A non-projective, rule-based, dependency parser. This parser
will return the set of all possible non-projective parses based on
the word-to-word relations defined in the parser's dependency
grammar, and will allow the branches of the parse tree to cross
in order to capture a variety of linguistic phenomena that a
projective parser will not.
"""
def __init__(self, dependency_grammar):
"""
Creates a new ``NonprojectiveDependencyParser``.
:param dependency_grammar: a grammar of word-to-word relations.
:type dependency_grammar: DependencyGrammar
"""
self._grammar = dependency_grammar
def parse(self, tokens):
"""
Parses the input tokens with respect to the parser's grammar. Parsing
is accomplished by representing the search-space of possible parses as
a fully-connected directed graph. Arcs that would lead to ungrammatical
parses are removed and a lattice is constructed of length n, where n is
the number of input tokens, to represent all possible grammatical
traversals. All possible paths through the lattice are then enumerated
to produce the set of non-projective parses.
param tokens: A list of tokens to parse.
type tokens: list(str)
return: An iterator of non-projective parses.
rtype: iter(DependencyGraph)
"""
# Create graph representation of tokens
self._graph = DependencyGraph()
for index, token in enumerate(tokens):
self._graph.nodes[index] = {
'word': token,
'deps': [],
'rel': 'NTOP',
'address': index,
}
for head_node in self._graph.nodes.values():
deps = []
for dep_node in self._graph.nodes.values() :
if (
self._grammar.contains(head_node['word'], dep_node['word'])
and head_node['word'] != dep_node['word']
):
deps.append(dep_node['address'])
head_node['deps'] = deps
# Create lattice of possible heads
roots = []
possible_heads = []
for i, word in enumerate(tokens):
heads = []
for j, head in enumerate(tokens):
if (i != j) and self._grammar.contains(head, word):
heads.append(j)
if len(heads) == 0:
roots.append(i)
possible_heads.append(heads)
# Set roots to attempt
if len(roots) < 2:
if len(roots) == 0:
for i in range(len(tokens)):
roots.append(i)
# Traverse lattice
analyses = []
for root in roots:
stack = []
analysis = [[] for i in range(len(possible_heads))]
i = 0
forward = True
while i >= 0:
if forward:
if len(possible_heads[i]) == 1:
analysis[i] = possible_heads[i][0]
elif len(possible_heads[i]) == 0:
analysis[i] = -1
else:
head = possible_heads[i].pop()
analysis[i] = head
stack.append([i, head])
if not forward:
index_on_stack = False
for stack_item in stack:
if stack_item[0] == i:
index_on_stack = True
orig_length = len(possible_heads[i])
if index_on_stack and orig_length == 0:
for j in range(len(stack) - 1, -1, -1):
stack_item = stack[j]
if stack_item[0] == i:
possible_heads[i].append(stack.pop(j)[1])
elif index_on_stack and orig_length > 0:
head = possible_heads[i].pop()
analysis[i] = head
stack.append([i, head])
forward = True
if i + 1 == len(possible_heads):
analyses.append(analysis[:])
forward = False
if forward:
i += 1
else:
i -= 1
# Filter parses
# ensure 1 root, every thing has 1 head
for analysis in analyses:
if analysis.count(-1) > 1:
# there are several root elements!
continue
graph = DependencyGraph()
graph.root = graph.nodes[analysis.index(-1) + 1]
for address, (token, head_index) in enumerate(zip(tokens, analysis), start=1):
head_address = head_index + 1
node = graph.nodes[address]
node.update(
{
'word': token,
'address': address,
}
)
if head_address == 0:
rel = 'ROOT'
else:
rel = ''
graph.nodes[head_index + 1]['deps'][rel].append(address)
# TODO: check for cycles
yield graph
#################################################################
# Demos
#################################################################
def demo():
# hall_demo()
nonprojective_conll_parse_demo()
rule_based_demo()
def hall_demo():
npp = ProbabilisticNonprojectiveParser()
npp.train([], DemoScorer())
for parse_graph in npp.parse(['v1', 'v2', 'v3'], [None, None, None]):
print(parse_graph)
def nonprojective_conll_parse_demo():
from nltk.parse.dependencygraph import conll_data2
graphs = [
DependencyGraph(entry) for entry in conll_data2.split('\n\n') if entry
]
npp = ProbabilisticNonprojectiveParser()
npp.train(graphs, NaiveBayesDependencyScorer())
for parse_graph in npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc']):
print(parse_graph)
def rule_based_demo():
from nltk.grammar import DependencyGrammar
grammar = DependencyGrammar.fromstring("""
'taught' -> 'play' | 'man'
'man' -> 'the' | 'in'
'in' -> 'corner'
'corner' -> 'the'
'play' -> 'golf' | 'dachshund' | 'to'
'dachshund' -> 'his'
""")
print(grammar)
ndp = NonprojectiveDependencyParser(grammar)
graphs = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])
print('Graphs:')
for graph in graphs:
print(graph)
if __name__ == '__main__':
demo()
|
|
import collections
import os
import random
from artificialproject.random import weighted_choice
class GenerationFailedException(Exception):
pass
GeneratedField = collections.namedtuple('GeneratedField', [
'value',
'deps',
])
class NullableGenerator:
def __init__(self, value_generator):
self._value_generator = value_generator
self._null_values = collections.Counter()
def add_sample(self, base_path, sample):
if sample is None:
self._null_values.update([True])
else:
self._null_values.update([False])
self._value_generator.add_sample(base_path, sample)
def generate(self, base_path):
if weighted_choice(self._null_values):
return GeneratedField(None, [])
else:
return self._value_generator.generate(base_path)
class SingletonGenerator:
def __init__(self, set_generator):
self._set_generator = set_generator
def add_sample(self, base_path, sample):
self._set_generator.add_sample(base_path, [sample])
def generate(self, base_path):
field = self._set_generator.generate(base_path)
assert len(field.value) == 1, field
return GeneratedField(field.value[0], field.deps)
class EnumSetGenerator:
def __init__(self):
self._lengths = collections.Counter()
self._values = collections.Counter()
def add_sample(self, base_path, sample):
self._lengths.update([len(sample)])
self._values.update(sample)
def generate(self, base_path):
length = weighted_choice(self._lengths)
options = collections.Counter(self._values)
output = []
while len(output) < length:
value = weighted_choice(options)
output.append(value)
del options[value]
return GeneratedField(output, [])
class StringGenerator:
def __init__(self, respect_file_extensions=False):
self._respect_file_extensions = respect_file_extensions
self._lengths = collections.Counter()
self._first_chars = collections.Counter()
self._other_chars = collections.Counter()
if self._respect_file_extensions:
self._extensions = collections.Counter()
def add_sample(self, base_path, sample):
self.add_string_sample(sample)
def add_string_sample(self, sample):
if self._respect_file_extensions:
sample, extension = os.path.splitext(sample)
self._extensions.update([extension])
self._lengths.update([len(sample)])
if sample:
self._first_chars.update(sample[0])
for ch in sample[1:]:
self._other_chars.update(ch)
def generate(self, base_path):
return GeneratedField(self.generate_string(), [])
def generate_string(self):
length = weighted_choice(self._lengths)
output = ''
if length > 0:
output += weighted_choice(self._first_chars)
while len(output) < length:
output += weighted_choice(self._other_chars)
if self._respect_file_extensions:
output += weighted_choice(self._extensions)
return output
class VisibilityGenerator:
def add_sample(self, base_path, sample):
pass
def generate(self, base_path):
return GeneratedField(['PUBLIC'], [])
class BuildTargetSetGenerator:
class DynamicFilteredList:
def __init__(self, input_list, predicate):
self._input_list = input_list
self._predicate = predicate
self._output_list = []
self._processed = 0
def get_values(self):
input_len = len(self._input_list)
while self._processed < input_len:
value = self._input_list[self._processed]
if self._predicate(value):
self._output_list.append(value)
self._processed += 1
return self._output_list
def __init__(
self,
context,
process_output_extensions=False,
override_types=None):
self._context = context
self._process_output_extensions = process_output_extensions
self._lengths = collections.Counter()
self._types = collections.Counter()
self._unique_values_by_type_and_extension = collections.defaultdict(set)
self._unique_values_dirty = False
self._choice_probability_by_type_and_extension = dict()
self._accepted_targets_by_type = dict()
self._accepted_targets_with_output_by_type = dict()
if self._process_output_extensions:
self._output_extensions_by_type = collections.defaultdict(
collections.Counter)
if override_types is None:
self._override_types = {}
else:
self._override_types = dict(override_types)
def add_sample(self, base_path, sample):
self._lengths.update([len(sample)])
for target in sample:
target = target.split('#')[0]
if target.startswith(':'):
target = '//' + base_path + target
target_data = self._context.input_target_data[target]
target_type = target_data['buck.type']
target_type = self._override_types.get(target_type, target_type)
self._types.update([target_type])
extension = None
if self._process_output_extensions:
extension = self._get_output_extension(target_data)
self._output_extensions_by_type[target_type].update([extension])
self._unique_values_by_type_and_extension[
(target_type, extension)].add(target)
self._unique_values_dirty = True
def _update_choice_probability(self):
self._choice_probability_by_type_and_extension = dict()
for (type, extension), used_values in (
self._unique_values_by_type_and_extension.items()):
all_values = (x for x in self._context.input_target_data.values()
if x['buck.type'] == type)
if self._process_output_extensions:
all_values = (x for x in all_values
if self._get_output_extension(x) == extension)
num = len(used_values)
denom = sum(1 for x in all_values)
probability = float(num) / denom
key = (type, extension)
self._choice_probability_by_type_and_extension[key] = probability
def _is_accepted(self, target_name):
target_data = self._context.gen_target_data[target_name]
target_type = target_data['buck.type']
extension = None
if self._process_output_extensions:
extension = self._get_output_extension(target_data)
probability = self._choice_probability_by_type_and_extension.get(
(target_type, extension), 0)
return random.uniform(0, 1) < probability
def generate(self, base_path, force_length=None):
if self._unique_values_dirty:
self._update_choice_probability()
self._unique_values_dirty = False
if force_length is not None:
length = force_length
else:
length = weighted_choice(self._lengths)
type_extension_counts = collections.Counter()
for i in range(length):
type = weighted_choice(self._types)
if self._process_output_extensions:
extension = weighted_choice(
self._output_extensions_by_type[type])
else:
extension = None
type_extension_counts.update([(type, extension)])
output = []
if self._process_output_extensions:
all_targets_dict = self._context.gen_targets_with_output_by_type
accepted_targets_dict = self._accepted_targets_with_output_by_type
else:
all_targets_dict = self._context.gen_targets_by_type
accepted_targets_dict = self._accepted_targets_by_type
for (type, extension), count in type_extension_counts.items():
options = accepted_targets_dict.get(type)
if options is None:
options = self.DynamicFilteredList(
all_targets_dict[type],
lambda x: self._is_accepted(x))
accepted_targets_dict[type] = options
options = options.get_values()
if extension is not None:
options = [x for x in options
if self._get_output_extension(
self._context.gen_target_data[x]) == extension]
if count > len(options):
raise GenerationFailedException()
output.extend(random.sample(options, count))
return GeneratedField(output, output)
def _get_output_extension(self, target_data):
if 'out' not in target_data or target_data['out'] is None:
return None
extension = os.path.splitext(target_data['out'])[1]
if extension == '':
return None
return extension
class PathSetGenerator:
def __init__(self, context):
self._context = context
self._component_generator = StringGenerator()
self._lengths = collections.Counter()
self._component_counts = collections.Counter()
self._extensions = collections.Counter()
def add_sample(self, base_path, sample):
self._lengths.update([len(sample)])
for path in sample:
self._context.file_path_generator.add_package_file_sample(
base_path,
path)
components = []
while path:
path, component = os.path.split(path)
components.append(component)
self._component_counts.update([len(components)])
if not components:
self._extensions.update([''])
else:
components[0], extension = os.path.splitext(components[0])
self._extensions.update([extension])
for component in components:
self._component_generator.add_sample(base_path, component)
def generate(self, base_path, force_length=None):
if force_length is not None:
length = force_length
else:
length = weighted_choice(self._lengths)
extension = weighted_choice(self._extensions)
output = [self._generate_path(base_path, extension)
for i in range(length)]
return GeneratedField(output, [])
def _generate_path(self, base_path, extension):
component_count = weighted_choice(self._component_counts)
path = self._context.file_path_generator.generate_path_in_package(
base_path,
component_count,
self._component_generator,
extension)
full_path = os.path.join(
self._context.output_repository,
base_path,
path)
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, 'w'):
pass
return path
class SourcePathSetGenerator:
def __init__(self, context):
self._build_target_set_generator = BuildTargetSetGenerator(
context, process_output_extensions=True)
self._path_set_generator = PathSetGenerator(context)
self._lengths = collections.Counter()
self._build_target_values = collections.Counter()
def add_sample(self, base_path, sample):
self._lengths.update([len(sample)])
for source_path in sample:
if source_path.startswith('//') or source_path.startswith(':'):
self._build_target_values.update([True])
self._build_target_set_generator.add_sample(
base_path, [source_path])
else:
self._build_target_values.update([False])
self._path_set_generator.add_sample(base_path, [source_path])
def generate(self, base_path):
length = weighted_choice(self._lengths)
build_target_count = 0
path_count = 0
for i in range(length):
if weighted_choice(self._build_target_values):
build_target_count += 1
else:
path_count += 1
build_targets = self._build_target_set_generator.generate(
base_path,
force_length=build_target_count)
paths = self._path_set_generator.generate(
base_path, force_length=path_count)
assert len(build_targets.value) == build_target_count, (
build_targets, build_target_count)
assert len(paths.value) == path_count, (paths, path_count)
return GeneratedField(
build_targets.value + paths.value,
build_targets.deps + paths.deps)
class SourcesWithFlagsGenerator:
def __init__(self, context):
self._source_path_set_generator = SourcePathSetGenerator(context)
self._flag_generator = StringGenerator()
self._flag_counts = collections.Counter()
def add_sample(self, base_path, sample):
source_paths = []
flag_lists = []
for source_with_flags in sample:
if isinstance(source_with_flags, list):
source_paths.append(source_with_flags[0])
flag_lists.append(source_with_flags[1])
else:
source_paths.append(source_with_flags)
flag_lists.append([])
self._source_path_set_generator.add_sample(base_path, source_paths)
for flags in flag_lists:
self._flag_counts.update([len(flags)])
for flag in flags:
self._flag_generator.add_sample(base_path, flag)
def generate(self, base_path):
source_paths = self._source_path_set_generator.generate(base_path)
output = [self._generate_source_with_flags(base_path, sp)
for sp in source_paths.value]
return GeneratedField(output, source_paths.deps)
def _generate_source_with_flags(self, base_path, source_path):
flag_count = weighted_choice(self._flag_counts)
if flag_count == 0:
return source_path
flags = [self._flag_generator.generate(base_path).value
for i in range(flag_count)]
return [source_path, flags]
|
|
#!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from utils import utils, inspector, admin
# http://www.fmc.gov/bureaus_offices/office_of_inspector_general.aspx
archive = 2005
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - The link to http://www.fmc.gov/userfiles/pages/file/OR10-03_UserFeeCalculations.pdf
# is incorrect. See `REPORT_URL_MAPPING`
# - Fix all links in `BLACKLIST_REPORT_URLS`
AUDIT_REPORTS_URL = "http://www.fmc.gov/about/oig_audits_evaluations_and_reviews.aspx"
SEMIANNUAL_REPORTS_URL = "http://www.fmc.gov/about/oig_semiannual_reports.aspx"
REPORT_URL_MAPPING = {
"http://www.fmc.gov/userfiles/pages/file/OR10-03_UserFeeCalculations.pdf":
"http://www.fmc.gov/assets/1/Page/OR10-03_UserFeeCalculations.pdf",
}
BLACKLIST_REPORT_URLS = [
"http://www.fmc.gov/UserFiles/pages/File/FY_08_Audited_Financial_Statements_09-01.pdf",
"http://www.fmc.gov/UserFiles/pages/File/OIG_Final_Report_09-01A.pdf",
"http://www.fmc.gov/UserFiles/pages/File/OIG_Report_OR09-01.pdf",
"http://www.fmc.gov/UserFiles/pages/File/OIG_Final_Report_A09-02.pdf",
"http://www.fmc.gov/UserFiles/pages/File/OIG_Final_Report_A09-03.pdf",
"http://www.fmc.gov/UserFiles/pages/File/OIG_Final_Report_A09-04.pdf",
"http://www.fmc.gov/UserFiles/pages/File/OIG_Report_A09-05.pdf",
"http://www.fmc.gov/UserFiles/pages/File/OIG_Final_Report_A09-06.pdf",
"http://www.fmc.gov/UserFiles/pages/File/OIG_Final_Report_A09-07.pdf",
]
REPORT_PUBLISHED_MAP = {
"AgencylettertoCongress": datetime.datetime(2016, 11, 28),
"508letterOIG2016": datetime.datetime(2016, 5, 27),
"A17-01": datetime.datetime(2016, 11, 9),
"A17-02": datetime.datetime(2016, 11, 8),
"A16-01": datetime.datetime(2015, 11, 10),
"A16-02": datetime.datetime(2015, 11, 12),
"A16-03": datetime.datetime(2016, 9, 9),
"A15-01": datetime.datetime(2014, 11, 14),
"A15-01A": datetime.datetime(2014, 12, 31),
"A15-02": datetime.datetime(2014, 11, 14),
"A15-03": datetime.datetime(2014, 11, 14),
"A15-04": datetime.datetime(2015, 3, 10),
"A15-05": datetime.datetime(2015, 9, 25),
"A14-01": datetime.datetime(2013, 12, 12),
"A14-01A": datetime.datetime(2014, 1, 31),
"A14-02": datetime.datetime(2014, 1, 3),
"A13-01": datetime.datetime(2012, 11, 6),
"A13-02": datetime.datetime(2012, 12, 6),
"A13-03": datetime.datetime(2012, 12, 21),
"A13-04": datetime.datetime(2012, 12, 14),
"A13-04A": datetime.datetime(2013, 3, 15),
"A12-01": datetime.datetime(2011, 11, 9),
"A12-02": datetime.datetime(2012, 1, 17),
"OR12-01": datetime.datetime(2012, 3, 2),
"A12-01A": datetime.datetime(2012, 3, 5),
"OR12-02": datetime.datetime(2012, 7, 17),
"OR11-02": datetime.datetime(2011, 9, 30),
"OR11-01": datetime.datetime(2011, 3, 16),
"A11-02A": datetime.datetime(2011, 1, 31),
"A11-02": datetime.datetime(2010, 11, 10),
"A11-01A": datetime.datetime(2010, 11, 8),
"A11-01": datetime.datetime(2010, 12, 15),
"OR10-04": datetime.datetime(2010, 8, 6),
"OR10-03": datetime.datetime(2010, 5, 27),
"OR10-02": datetime.datetime(2010, 5, 14),
"A10-01": datetime.datetime(2009, 11, 6),
"A10-01A": datetime.datetime(2010, 3, 2),
"OR10-01": datetime.datetime(2010, 3, 4),
"A10-02": datetime.datetime(2010, 1, 28),
"A10-03": datetime.datetime(2010, 3, 1),
"A09-01": datetime.datetime(2008, 11, 6),
"A09-01A": datetime.datetime(2009, 1, 15),
"OR09-01": datetime.datetime(2009, 1, 12),
"A09-02": datetime.datetime(2009, 2, 6),
"A09-03": datetime.datetime(2009, 7, 7),
"A09-04": datetime.datetime(2009, 6, 30),
"A09-05": datetime.datetime(2009, 7, 20),
"A09-06": datetime.datetime(2009, 7, 28),
"A09-07": datetime.datetime(2009, 8, 21),
"A08-01": datetime.datetime(2007, 11, 16),
"A08-02": datetime.datetime(2007, 11, 6),
"A08-02A": datetime.datetime(2007, 12, 12),
"A08-03": datetime.datetime(2008, 1, 23),
"A08-04": datetime.datetime(2008, 3, 18),
"A08-05": datetime.datetime(2008, 8, 29),
"A08-06": datetime.datetime(2008, 9, 10),
"A08-07": datetime.datetime(2008, 9, 22),
"A08-08": datetime.datetime(2008, 9, 22),
"A07-01": datetime.datetime(2006, 11, 13),
"OR07-01": datetime.datetime(2007, 1, 19),
"A07-02": datetime.datetime(2007, 5, 4),
"OR07-02": datetime.datetime(2007, 6, 29),
"A06-01": datetime.datetime(2006, 3, 30),
"OR06-01": datetime.datetime(2006, 8, 22),
"A06-02": datetime.datetime(2006, 8, 1),
"A06-04": datetime.datetime(2006, 10, 2),
}
def run(options):
year_range = inspector.year_range(options, archive)
# Pull the audit reports
doc = utils.beautifulsoup_from_url(AUDIT_REPORTS_URL)
results = doc.select("table tr")
if not results:
raise inspector.NoReportsFoundError("Federal Maritime Commission (audits)")
for result in results:
if result.th:
# Skip the header row
continue
report = report_from(result, AUDIT_REPORTS_URL, report_type='audit', year_range=year_range)
if report:
inspector.save_report(report)
# Pull historical audits
audit_year_links = doc.select("div.col-2-3 ul li a")
for year_link in audit_year_links:
audit_year_url = urljoin(AUDIT_REPORTS_URL, year_link.get('href'))
doc = utils.beautifulsoup_from_url(audit_year_url)
results = doc.select("table tr")
if not results:
# Grab results other than first and last (header and extra links)
results = doc.select("div.col-2-2 ul")[1:-1]
if not results:
raise inspector.NoReportsFoundError("Federal Maritime Commission (%s)" % audit_year_url)
for result in results:
if result.th:
# Skip the header row
continue
report = report_from(result, AUDIT_REPORTS_URL, report_type='audit', year_range=year_range)
if report:
inspector.save_report(report)
# Pull the semiannual reports
doc = utils.beautifulsoup_from_url(SEMIANNUAL_REPORTS_URL)
results = doc.select("div.col-2-2 p a") + doc.select("div.col-2-2 li a")
if not results:
raise inspector.NoReportsFoundError("Federal Maritime Commission (semiannual reports)")
for result in results:
report = report_from(result.parent, AUDIT_REPORTS_URL, report_type='semiannual_report', year_range=year_range)
if report:
inspector.save_report(report)
def report_from(result, landing_url, report_type, year_range):
link = result.find("a")
report_url = urljoin(landing_url, link.get('href'))
title = link.text
if report_url in REPORT_URL_MAPPING:
report_url = REPORT_URL_MAPPING[report_url]
if report_url in BLACKLIST_REPORT_URLS:
return
try:
report_id = result.select("td")[0].text
except IndexError:
try:
report_id = result.select("li")[0].text
except IndexError:
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
published_on = None
if report_id in REPORT_PUBLISHED_MAP:
published_on = REPORT_PUBLISHED_MAP[report_id]
if not published_on:
try:
published_on_text = title.split("-")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
except ValueError:
pass
if not published_on:
admin.log_no_date("fmc", report_id, title, report_url)
return
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'fmc',
'inspector_url': 'http://www.fmc.gov/bureaus_offices/office_of_inspector_general.aspx',
'agency': 'fmc',
'agency_name': 'Federal Maritime Commission',
'type': report_type,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
|
|
#!/usr/bin/python
import os, sys # low level handling, such as command line stuff
from low import write_to_file
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
import math # match functions
from low import * # custom functions, written by myself
import tempfile # generate tmp files
from Bio import SeqIO # biopython stuff, to parse fasta files for instance
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path> [-m <path> -c <path>] [-s <path>]" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f path to the fasta file containing all contig and singleton sequences" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
file = ''
for key, value in keys:
if key == '-f': file = value
if file == '':
stderr( "sequence data file missing." )
show_help()
elif not file_exists( value ):
stderr( "invalid path in " + key )
show_help()
file = get_global_path( file )
return file
# =============================================================================
def get_sequences( fastafile, number ):
"""
gets the first <number> of sequences within the fasta file.
writes it to a file, returns the filename of this file.
"""
fh, tmpfilename = tempfile.mkstemp(dir='.')
fw = open( tmpfilename, 'w' )
handle = open(fastafile)
count = 0
for seq_record in SeqIO.parse(handle, "fasta"):
count += 1
if count > number: break
fw.write( '>' + seq_record.id + '\n' + seq_record.seq.tostring() + '\n' )
handle.close()
fw.flush()
fw.close()
return tmpfilename
def generate_datasets( file ):
datahash = {}
fo = open( file )
for line in fo:
alphabet, counter, path = line.split()
datahash[ alphabet+'-'+counter ] = path
fo.close()
seqhash = {}
# nucleotide
seqhash[ 'nt-1-1' ] = get_sequences(datahash[ 'nt-1' ], 500)
seqhash[ 'nt-2-1' ] = get_sequences(datahash[ 'nt-2' ], 50)
#seqhash[ 'nt-1-2' ] = get_sequences(datahash[ 'nt-1' ], 100)
#seqhash[ 'nt-2-2' ] = get_sequences(datahash[ 'nt-2' ], 250)
#seqhash[ 'nt-1-3' ] = get_sequences(datahash[ 'nt-1' ], 165)
#seqhash[ 'nt-2-3' ] = get_sequences(datahash[ 'nt-2' ], 165)
# amino acid
#seqhash[ 'aa-1-1' ] = get_sequences(datahash[ 'aa-1' ], 100)
#seqhash[ 'aa-2-1' ] = get_sequences(datahash[ 'aa-2' ], 100)
#seqhash[ 'aa-1-2' ] = get_sequences(datahash[ 'aa-1' ], 150)
#seqhash[ 'aa-2-2' ] = get_sequences(datahash[ 'aa-2' ], 150)
#seqhash[ 'aa-1-3' ] = get_sequences(datahash[ 'aa-1' ], 200)
#seqhash[ 'aa-2-3' ] = get_sequences(datahash[ 'aa-2' ], 200)
#seqhash[ 'aa-1-4' ] = get_sequences(datahash[ 'aa-1' ], 300)
#seqhash[ 'aa-2-4' ] = get_sequences(datahash[ 'aa-2' ], 300)
for key, path in seqhash.iteritems():
if key.startswith('nt'): t = 'n'
else: t = 'p'
os.system( "xdformat -" + t + " " + path + " &> xdformat.log")
os.system( "formatdb -i " + path )
return seqhash
# =============================================================================
def determine_blast_program( type1, type2 ):
"""
"""
if type1 == 'nt' and type2 == 'nt':
return 'tblastx'
elif type1 == 'aa' and type2 == 'aa':
return 'blastp'
elif type1 == 'aa' and type2 == 'nt':
return 'tblastn'
elif type1 == 'nt' and type2 == 'aa':
return 'blastx'
else:
return None
# =============================================================================
def benchmark_blastall( type1, path1, type2, path2 ):
"""
determines the runtime of dataset1 blasted against dataset2.
determines the type of blast to use depending on the file types (aa or nt).
"""
p = determine_blast_program( type1, type2 )
starttime = time.time()
os.system( "blastall -p " + p + " -d " + path2 + " -i " + path1 + " -o blastall.out" )
runtime = time.time() - starttime
print "benchmark blastall", type1, "vs", type2, "--", runtime
# =============================================================================
def benchmark_wublast( type1, path1, type2, path2 ):
"""
determines the runtime of dataset1 blasted against dataset2.
determines the type of blast to use depending on the file types (aa or nt).
wublast syntax: <program> <database> <query> [options...]
"""
p = determine_blast_program( type1, type2 )
starttime = time.time()
os.system( p + " " + path2 + " " + path1 + " &> wublast.out")
runtime = time.time() - starttime
sin,sout = os.popen2("grep \> -c " + path1)
sin.close()
s1 = sout.read().replace('\n','')
sout.close()
sin,sout = os.popen2("grep \> -c " + path2)
sin.close()
s2 = sout.read().replace('\n','')
sout.close()
print "benchmark wublast", s1, type1, "vs", s2, type2, "--", runtime
def xdformat( file, type ):
os.system( "xdformat -" + type + " " + file + " &> xdformat.log")
# =============================================================================
def bench_nt_vs_aa( seqhash ):
print "benchmark nt vs aa"
ricent = 'data/rice.nt'
riceaa = 'data/rice.aa'
arathnt = 'data/arath.nt'
arathaa = 'data/arath.aa'
rice_nt_100 = get_sequences(ricent, 100)
xdformat( rice_nt_100, 'n' )
arath_nt_100 = get_sequences(arathnt, 100)
xdformat( arath_nt_100, 'n' )
rice_nt_300 = get_sequences(ricent, 300)
xdformat( rice_nt_300, 'n' )
arath_nt_300 = get_sequences(arathnt, 300)
xdformat( arath_nt_300, 'n' )
rice_nt_500 = get_sequences(ricent, 500)
xdformat( rice_nt_500, 'n' )
arath_nt_500 = get_sequences(arathnt, 500)
xdformat( arath_nt_500, 'n' )
rice_aa_100 = get_sequences(riceaa, 100)
xdformat( rice_aa_100, 'p' )
arath_aa_100 = get_sequences(arathaa, 100)
xdformat( arath_aa_100, 'p' )
rice_aa_300 = get_sequences(riceaa, 300)
xdformat( rice_aa_300, 'p' )
arath_aa_300 = get_sequences(arathaa, 300)
xdformat( arath_aa_300, 'p' )
rice_aa_500 = get_sequences(riceaa, 500)
xdformat( rice_aa_500, 'p' )
arath_aa_500 = get_sequences(arathaa, 500)
xdformat( arath_aa_500, 'p' )
print "---"
print "TBLASTX"
benchmark_wublast( 'nt', rice_nt_100, 'nt', arath_nt_100 )
benchmark_wublast( 'nt', rice_nt_300, 'nt', arath_nt_300 )
benchmark_wublast( 'nt', rice_nt_500, 'nt', arath_nt_500 )
print "---"
print "BLASTX"
benchmark_wublast( 'nt', rice_nt_100, 'aa', arath_aa_100 )
benchmark_wublast( 'nt', rice_nt_300, 'aa', arath_aa_300 )
benchmark_wublast( 'nt', rice_nt_500, 'aa', arath_aa_500 )
print "---"
print "TBLASTN"
benchmark_wublast( 'aa', rice_aa_100, 'nt', arath_nt_100 )
benchmark_wublast( 'aa', rice_aa_300, 'nt', arath_nt_300 )
benchmark_wublast( 'aa', rice_aa_500, 'nt', arath_nt_500 )
print "---"
print "BLASTP"
benchmark_wublast( 'aa', rice_aa_100, 'aa', arath_aa_100 )
benchmark_wublast( 'aa', rice_aa_300, 'aa', arath_aa_300 )
benchmark_wublast( 'aa', rice_aa_500, 'aa', arath_aa_500 )
print "---"
# =============================================================================
def bench_sizes( seqhash ):
print "benchmark sizes"
ricent = 'data/rice.nt'
riceaa = 'data/rice.aa'
arathnt = 'data/arath.nt'
arathaa = 'data/arath.aa'
arath_aa_200 = get_sequences(arathaa, 200)
xdformat( arath_aa_200, 'p' )
rice_aa_10 = get_sequences(riceaa, 10)
xdformat( rice_aa_10, 'p' )
rice_aa_50 = get_sequences(riceaa, 50)
xdformat( rice_aa_50, 'p' )
rice_aa_200 = get_sequences(riceaa, 200)
xdformat( rice_aa_200, 'p' )
rice_aa_300 = get_sequences(riceaa, 300)
xdformat( rice_aa_300, 'p' )
rice_aa_500 = get_sequences(riceaa, 500)
xdformat( rice_aa_500, 'p' )
print "---"
benchmark_wublast( 'aa', rice_aa_10, 'aa', arath_aa_200 )
benchmark_wublast( 'aa', rice_aa_50, 'aa', arath_aa_200 )
benchmark_wublast( 'aa', rice_aa_200, 'aa', arath_aa_200 )
benchmark_wublast( 'aa', rice_aa_300, 'aa', arath_aa_200 )
benchmark_wublast( 'aa', rice_aa_500, 'aa', arath_aa_200 )
print "---"
benchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_10 )
benchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_50 )
benchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_200 )
benchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_300 )
benchmark_wublast( 'aa', arath_aa_200, 'aa', rice_aa_500 )
print "---"
# =============================================================================
def bench_single_vs_multiple_files( seqhash ):
def single_files( file ):
count = 0
filenames = {}
handle = open(file)
for seq_record in SeqIO.parse(handle, "fasta") :
filenames[ file+str(count) ] = 1
write_to_file( file+str(count), seq_record.id + '\n' + seq_record.seq.tostring() + '\n' )
count += 1
handle.close()
return filenames
print "benchmark query files"
ricent = 'data/rice.nt'
riceaa = 'data/rice.aa'
arathnt = 'data/arath.nt'
arathaa = 'data/arath.aa'
rice_aa_10 = get_sequences(riceaa, 50)
rice_aa_50 = get_sequences(riceaa, 200)
rice_aa_100 = get_sequences(riceaa, 500)
arath_aa_1000 = get_sequences(arathaa, 1000)
xdformat( arath_aa_1000, 'p' )
print "---"
# split the files
p = 'blastp'
starttime = time.time()
filenames = single_files( rice_aa_10 )
for file in filenames.keys():
os.system( p + " " + arath_aa_1000 + " " + file + " &> wublast.out")
sys.stdout.write('.')
runtime = time.time() - starttime
sys.stdout.write('\n')
print "benchmark wublast", str(len(filenames.keys())), "--", runtime
for file in filenames.keys(): os.unlink(file)
starttime = time.time()
filenames = single_files( rice_aa_50 )
for file in filenames.keys():
os.system( p + " " + arath_aa_1000 + " " + file + " &> wublast.out")
sys.stdout.write('.')
runtime = time.time() - starttime
sys.stdout.write('\n')
print "benchmark wublast", str(len(filenames.keys())), "--", runtime
for file in filenames.keys(): os.unlink(file)
starttime = time.time()
filenames = single_files( rice_aa_100 )
for file in filenames.keys():
os.system( p + " " + arath_aa_1000 + " " + file + " &> wublast.out")
sys.stdout.write('.')
runtime = time.time() - starttime
sys.stdout.write('\n')
print "benchmark wublast", str(len(filenames.keys())), "--", runtime
for file in filenames.keys(): os.unlink(file)
print "---"
benchmark_wublast( 'aa', rice_aa_10, 'aa', arath_aa_1000 )
benchmark_wublast( 'aa', rice_aa_50, 'aa', arath_aa_1000 )
benchmark_wublast( 'aa', rice_aa_100, 'aa', arath_aa_1000 )
# =============================================================================
def remove_tmpfiles( seqhash ):
for key, value in seqhash.iteritems():
os.system( "rm " + value + "*" )
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
def main():
"""
"""
file = handle_arguments()
seqhash = generate_datasets( file )
#bench_nt_vs_aa( seqhash )
bench_sizes( seqhash )
#bench_single_vs_multiple_files( seqhash )
remove_tmpfiles( seqhash )
# =============================================================================
main()
|
|
#!/usr/bin/env python3.9
import os, sys, re, time, io, configparser, imaplib, html.parser, base64
import email.parser, email.header, email.utils
import tkinter, tkinter.font, tkinter.messagebox, PIL.Image, PIL.ImageTk
class HTMLNoteParser(html.parser.HTMLParser):
style_tag = ""
style_num = 0
textField = None
def __init__(self, textField):
html.parser.HTMLParser.__init__(self)
self.textField = textField
def getColor(self, cssvalue):
rgba_re = re.compile(r"rgba?\((\d{1,3}),\s*(\d{1,3}),\s*(\d{1,3})")
match = rgba_re.match(cssvalue)
if match is None:
return cssvalue
else:
return "#%02x%02x%02x" % tuple(map(int, match.group(1, 2, 3)))
def parseStyle(self, css):
style = {}
for setting in [x for x in css.split(";") if ":" in x]:
cssname, cssvalue = map(str.strip, setting.split(":", 1))
if cssname == "color":
style["foreground"] = self.getColor(cssvalue)
elif cssname == "background-color":
style["background"] = self.getColor(cssvalue)
self.textField.tag_configure(self.style_tag, style)
def handle_starttag(self, tag, attrs):
self.style_tag = "tag%d" % (self.style_num,)
self.style_num += 1
if tag == "strike":
font = tkinter.font.Font(overstrike=True)
self.textField.tag_configure(self.style_tag, font=font)
elif tag == "b":
font = tkinter.font.Font(weight="bold")
self.textField.tag_configure(self.style_tag, font=font)
elif tag == "i":
font = tkinter.font.Font(weight="italic")
self.textField.tag_configure(self.style_tag, font=font)
elif tag in ("br", "div", "p", "h1", "h2", "h3", "h4"):
self.textField.insert(tkinter.END, "\n")
for name, value in attrs:
if name == "style":
self.parseStyle(value)
def handle_endtag(self, tag):
if tag in ("div", "p", "h1", "h2", "h3", "h4"):
self.textField.insert(tkinter.END, "\n")
def handle_startendtag(self, tag, attrs):
if tag in ("br", "div", "p", "h1", "h2", "h3", "h4"):
self.textField.insert(tkinter.END, "\n")
def handle_data(self, data):
previous_style_tag = self.style_tag
self.textField.insert(tkinter.END, data.replace("\r", ""),
self.style_tag)
self.style_tag = previous_style_tag
def displayMessage(message):
if message.is_multipart():
for part in message.get_payload():
displayMessage(part)
else:
contenttype = message.get_content_type().lower()
body = message.get_payload(decode=True)
if contenttype.startswith("text/plain"):
textField.insert(tkinter.END, body.decode().replace("\r", ""))
elif contenttype.startswith("text/html"):
HTMLNoteParser(textField).feed(body.decode())
elif contenttype.startswith("image/"):
img = PIL.ImageTk.PhotoImage(data=body)
textField.image_create(tkinter.END, image=img)
imgCache.append(img)
else:
textField.insert(tkinter.END, "<cannot display " + contenttype +
">")
def displayNote(*args):
global noteChanged
if noteChanged:
index = listBox.index(tkinter.ACTIVE)
subject = textField.get(1.0, 2.0).strip().encode("utf-8")
body = textField.get(1.0, tkinter.END).encode("utf-8")
note = notes[len(notes) - index - 1]
note["message"].set_payload(body)
note["subject"] = subject
note["changed"] = True
listBox.delete(index)
listBox.insert(index, subject)
noteChanged = False
index = listBox.curselection()
if len(index) > 0:
textField.delete(1.0, tkinter.END)
for tag in textField.tag_names():
textField.tag_delete(tag)
index = index[0]
message = notes[len(notes) - index - 1]["message"]
imgCache = []
displayMessage(message)
deleteButton.config(state=tkinter.NORMAL)
textField.edit_modified(False)
else:
deleteButton.config(state=tkinter.DISABLED)
def newNote():
subject = "new note"
message = email.message_from_string("")
notes.append({
"uid": None,
"message": message,
"subject": subject,
"changed": False,
})
listBox.insert(0, subject)
listBox.selection_clear(0, tkinter.END)
listBox.selection_set(0)
listBox.activate(0)
listBox.event_generate("<<ListboxSelect>>")
def deleteNote():
index = listBox.index(tkinter.ACTIVE)
if index >= 0:
rindex = len(notes) - index - 1
message = notes[rindex]
listBox.delete(index)
notes.remove(message)
deletedNotes.append(message)
deleteButton.config(state=tkinter.DISABLED)
def set_header(message, header, value, replace=True):
if not header in message.keys():
message[header] = value
elif replace:
message.replace_header(header, value)
def imapConnect():
try:
config = configparser.ConfigParser()
config.read(os.path.expanduser("~/.imapnotes.ini"))
host = config.get("connection", "host")
if config.has_option("connection", "port"):
imap = imaplib.IMAP4_SSL(host, config.get("connection", "port"))
else:
imap = imaplib.IMAP4_SSL(host)
imap.login(config.get("connection", "user"),
config.get("connection", "pass"))
imap.select("Notes")
return imap
except Exception as e:
tkinter.messagebox.showerror("Connection failed",
"Cannot connect to IMAPS server:\n%s" % (str(e),))
sys.exit(1)
def imapNoop():
try:
imap.noop()
except Exception as e:
imapConnect()
def deleteUid(uid):
ret = imap.uid("COPY", uid, "Trash")
print("moved note to Trash:\nuid=%s\nret=%s\n" % (uid, ret))
ret = imap.uid("STORE", uid, "+FLAGS", "(\\Deleted)")
print("deleted note:\nuid=%s\nret=%s\n" % (uid, ret))
def saveNotes(*args):
displayNote(args)
imapNoop()
for note in [x for x in notes if x["changed"]]:
message = note["message"]
subject = email.header.Header(note["subject"], "utf-8")
rfc_now = email.utils.formatdate(localtime=True)
set_header(message, "Subject", subject)
set_header(message, "Content-Type", "text/plain; charset=utf-8")
set_header(message, "Content-Transfer-Encoding", "8bit")
set_header(message, "X-Uniform-Type-Identifier", "com.apple.mail-note")
set_header(message, "Message-ID", email.utils.make_msgid())
set_header(message, "Date", email.utils.formatdate(localtime=True))
set_header(message, "X-Mail-Created-Date", rfc_now, False)
set_header(message, "X-Universally-Unique-Identifier",
email.utils.make_msgid().split("@")[0][1:], False)
now = imaplib.Time2Internaldate(time.time())
ret = imap.append("Notes", "", now, message.as_bytes())
print("changed note:\nsubject=%s\nret=%s\n" % (subject,ret))
uid = note["uid"]
if not uid is None:
deleteUid(uid)
for note in deletedNotes:
uid = note["uid"]
if not uid is None:
deleteUid(uid)
print("expunged mailbox=%s\n" % (imap.expunge(),))
print("closed mailbox=%s\n" % (imap.close(),))
root.destroy()
def textModified(*args):
global noteChanged
if textField.edit_modified():
noteChanged = True
imap = imapConnect()
notes = []
deletedNotes = []
# search returns tuple with list
notes_numbers = imap.uid("search", None, "ALL")[1][0].decode().replace(" ", ",")
# imap fetch expects comma separated list
if len(notes_numbers) > 0:
notes_list = imap.uid("fetch", notes_numbers, "RFC822")
uid_re = re.compile(r"UID\s+(\d+)")
for part in notes_list[1]:
# imap fetch returns s.th. like:
# ('OK', [('1 (UID 1 RFC822 {519}', 'From: ...'), ')'])
if part == b")":
continue
match = uid_re.search(part[0].decode())
uid = None if match is None else match.group(1)
message = email.message_from_bytes(part[1])
subject = ""
raw_subject = message.get("subject")
for substring, charset in email.header.decode_header(raw_subject):
if not charset is None:
substring = substring.decode(charset)
subject += substring
notes.append({
"uid": uid,
"message": message,
"subject": subject,
"changed": False,
})
gifdata = base64.b64decode("""
R0lGODlhQABAAKECAAAAAPHKGf///////yH5BAEKAAIALAAAAABAAEAAAAL+lH+gy+0PI0C0Jolz
tvzqDypdtwTmiabqyqLLyJXtTLcvXMn1vt84ouMJWb6fIThMnopGXegJWIqMHin0Y6VWTVdQVitA
KpPMn3gsLOPO6N5Uy27T1LC43Pam2u8r+sjZpfEFp2AVKDGoV8h1iJHYtMjH40cSKVlDGWN5OZNp
scfJOAEGGuqZsxnalwcZJdoI8WgWCYsoChZGWxt7S5qqmnJKUcopDPQLLLuGnBxgvNWs8nzEnDyd
6+q8+6Bcp7vd0P33DQ44Spgd7cI6m67ei/4ezL7s/n5NfIlfDbxvr+7PULlsAV8NFFeJ4EBzuPJJ
KigPnqJ/0SBGtCgP4z0pet4oNoO4kKEvhSERaiK50OQnfqo0AuQ4zqM1mAlDYmhoc8PInBE6FAAA
Ow==
""")
root = tkinter.Tk()
gificon = tkinter.PhotoImage(data=gifdata)
root.tk.call('wm', 'iconphoto', root._w, gificon)
root.title("imapnotes")
root.protocol("WM_DELETE_WINDOW", saveNotes)
root.after(42000, imapNoop)
frameButtons = tkinter.Frame(root)
frameButtons.pack(fill=tkinter.BOTH, expand=1)
frameListAndText = tkinter.Frame(frameButtons)
frameListAndText.pack(side=tkinter.LEFT)
newButton = tkinter.Button(frameListAndText, text="New", command=newNote)
newButton.pack(side=tkinter.TOP, fill=tkinter.X)
deleteButton = tkinter.Button(frameListAndText, text="Delete",
command=deleteNote, state=tkinter.DISABLED)
deleteButton.pack(side=tkinter.TOP, fill=tkinter.X)
panedWindow = tkinter.PanedWindow(frameButtons, orient=tkinter.HORIZONTAL)
panedWindow.pack(fill=tkinter.BOTH, expand=1)
listBox = tkinter.Listbox(panedWindow)
vscroll = tkinter.Scrollbar(listBox, command=listBox.yview,
orient=tkinter.VERTICAL)
vscroll.pack(side=tkinter.RIGHT, fill=tkinter.Y)
listBox.config(yscrollcommand=vscroll.set)
panedWindow.add(listBox, width=300, height=400)
for note in notes:
listBox.insert(0, note["subject"])
listBox.bind("<<ListboxSelect>>", displayNote)
textField = tkinter.Text(panedWindow, undo=True, wrap=tkinter.WORD)
vscroll = tkinter.Scrollbar(textField, command=textField.yview,
orient=tkinter.VERTICAL)
vscroll.pack(side=tkinter.RIGHT, fill=tkinter.Y)
textField.config(yscrollcommand=vscroll.set)
textField.bind("<<Modified>>", textModified)
panedWindow.add(textField, width=500)
noteChanged = False
imgCache = []
root.mainloop()
|
|
import os
import numpy as np
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
matplotlib.rcParams['font.size'] = 16.0
import kid_readout.analysis.noise_archive
from kid_readout.analysis import khalil
default_settings = dict(fractional_f_0_err_limit = 1e-6,
fractional_Q_err_limit = 0.06,
valid_Q_range = (5000,1e6),
max_package_temp_deviation = None,
)
settings = dict(valid_load_temp_range = (4,8.0),
f_0_max_temp_limit = 5.0,
)
all_settings = dict()
all_settings.update(default_settings)
all_settings.update(settings)
def plot_s21(data_,ax=None,min_load_temp=4,max_load_temp=8, which_temp='sweep_primary_load_temperature'):
"""
Plot IQ resonance circles at different load temperatures
all sweeps will be plotted, so may want to down select data before
"""
if ax is None:
fig,ax = plt.subplots(figsize=(8,8))
data_ = data_.sort([which_temp])
for k,row in data_.iterrows():
s21 = row['sweep_normalized_s21']
load_temp = row[which_temp]
color = plt.cm.spectral((load_temp-min_load_temp+.1)/(max_load_temp-min_load_temp+.1))
s21m = row['sweep_model_normalized_s21']
ax.plot(s21m.real,s21m.imag,color=color)
ax.plot(s21.real,s21.imag,'.',color=color)
#lpf = row['low_pass_normalized_timeseries'].mean()
#ax.plot(lpf.real,lpf.imag,'x',mew=2,markersize=20,color=color)
s0 = row['normalized_model_s21_at_resonance']
ax.plot(s0.real,s0.imag,'+',mew=2,markersize=20,color='k')
ax.grid()
ax.set_xlim(0,1.1)
ax.set_ylim(-0.5,0.5)
def plot_load_vs_freq_shift(all_data,axs=None,min_load_temp=4,max_load_temp=8,anchor_range = (30,100)):
"""
Plot shift in resonance frequency as a function of load temperature
"""
if axs is None:
fig,(ax1,ax2,ax3,ax4) = plt.subplots(nrows=4,figsize=(8,12),sharex=True,squeeze=True)
else:
ax1,ax2,ax3,ax4 = axs
x = np.linspace(min_load_temp,max_load_temp,100)
slope = all_data.responsivity_Hz_per_K.iat[0]
offset = all_data.responsivity_offset.iat[0]
polynomial = np.array([slope,offset])
y = np.polyval(polynomial,x)
fractional = slope/(all_data.f_0_max.iat[0])
anchors = all_data[(all_data.timestream_duration>anchor_range[0]) &
(all_data.timestream_duration<anchor_range[1])]
for name,marker,color,data_ in zip(['in transition','steady'],
['.','o'],
['b','r'],
[all_data,anchors]):
load_temp = data_.sweep_primary_load_temperature #data_.timestream_primary_load_temperature.apply(np.mean)
ax1.errorbar(load_temp,data_.delta_f_0_Hz,yerr=data_.f_0_err*1e6,marker=marker,linestyle='none',label=name,color=color)
ax2.errorbar(load_temp,data_.delta_f_0_Hz-np.polyval(polynomial,data_.sweep_primary_load_temperature),
yerr=data_.f_0_err*1e6,linestyle='none',marker=marker,color=color,label=name)
ax3.plot(load_temp,1e6/data_.Q_i,marker,label=name,color=color)
ax4.plot(load_temp,1e6/data_.Q,marker,label=name,color=color)
ax1.plot(x,y,label=('%.0f+/-%.0f Hz/K\n %.1fppm/K' % (slope,all_data.responsivity_err.iat[0], fractional)))
ax2.set_ylim(-200,200)
ax1.legend(loc='lower left',prop=dict(size='small'))
ax1.grid()
ax1.set_xlabel('Load Temperature (K)')
ax2.grid()
ax2.set_ylabel('Residual (Hz)')
ax2.set_xlabel('Load Temperature (K)')
ax1.set_ylabel('Frequency Shift (Hz)')
ax3.grid()
ax3.set_xlabel('Load_Temperature(K)')
ax3.set_ylabel('$10^6/Q_i$')
ax4.grid()
ax4.set_xlabel('Load_Temperature(K)')
ax4.set_ylabel('$10^6/Q_r$')
def plot_load_vs_fractional_freq_shift(all_data,ax=None):
"""
Plot fractional frequency shift as a function of load temperature for all resonators
"""
if ax is None:
fig,ax = plt.subplots(figsize=(8,8))
for name, group in all_data.groupby('resonator_index'):
ax.plot(group.sweep_primary_load_temperature,group.fractional_delta_f_0,'.')
ax.grid()
ax.set_ylim(-2e-4,1e-5)
ax.set_ylabel('Fractional Frequency Shift')
ax.set_xlabel('Load Temperature (K)')
return fig
def plot_noise(data_,axs=None,min_load_temp=4,max_load_temp=8,max_uK=100):
"""
Plot noise spectra and noise levels vs load temperature
"""
if axs is None:
fig,(ax1,ax2) = plt.subplots(nrows=2,figsize=(8,8),squeeze=True)
else:
ax1,ax2 = axs
label1 = '150 Hz'
label2 = '15 kHz'
for k,row in data_.iterrows(): # .sort(['sweep_primary_load_temperature'])
load_temp = row['sweep_primary_load_temperature']
color = plt.cm.spectral((load_temp-min_load_temp+.1)/(max_load_temp-min_load_temp+.1))
freq = row['pca_freq']
net = row['net_uK_rootsec']
ax1.semilogx(freq,net[1,:],color=color,lw=2,label=('%.2f K' % load_temp))
noise_150 = net[1,:][(freq>100) & (freq<200)].mean()
noise_10k = net[1,:][(freq>1e4) & (freq<2e4)].mean()
ax2.plot(load_temp,noise_150,'o',color=color,mew=2,markersize=10,label=label1)
ax2.plot(load_temp,noise_10k,'d',color=color,mew=2,markersize=10,label=label2)
if label1:
label1 = None
label2 = None
ax1.grid()
ax1b = ax1.twinx()
ax1.set_xlim(10,2e4)
ax1.set_ylim(0,max_uK)
ax1b.set_ylim(0,max_uK*(1e-6*np.sqrt(2)*abs(data_.responsivity_Hz_per_K.iat[0])))
ax1b.grid(color='r')
ax1.axvspan(100,200,hatch='/',fc='none',alpha=0.2)
ax1.axvspan(1e4,2e4,hatch='/',fc='none',alpha=0.2)
ax1.set_xlabel('Hz')
ax1.set_ylabel(r'$\mu$K$\sqrt{s}$')
ax1b.set_ylabel(r'$Hz/\sqrt{Hz}$')
ax2.grid()
ax2b = ax2.twinx()
ax2.set_ylim(0,max_uK)
ax2b.set_ylim(0,max_uK*(1e-6*np.sqrt(2)*abs(data_.responsivity_Hz_per_K.iat[0])))
ax2b.grid(color='r')
ax2.legend(loc='lower right',prop=dict(size='small'))
ax2.set_ylabel(r'$\mu$K$\sqrt{s}$')
ax2b.set_ylabel(r'$Hz/\sqrt{Hz}$')
ax2.set_xlabel('Load Temperature (K)')
def plot_resonator_net(data_,resonator_index=0,fig = None,axs=None,anchor_range=(30,100),min_load_temp=4, max_load_temp=8,max_uK=70):
"""
Make complete plot for a given resonator (including S21 sweep, responsivity, Q's, and noise)
"""
if fig is None:
fig,axs = plt.subplots(ncols=4,nrows=2,figsize=(20,10))
fig.subplots_adjust(wspace=.3)
data_ = data_[data_.resonator_index == resonator_index]
anchors = data_[(data_.timestream_duration>anchor_range[0]) &
(data_.timestream_duration<anchor_range[1])]
plot_s21(anchors, ax=axs[1,0], min_load_temp=min_load_temp, max_load_temp=max_load_temp)
plot_load_vs_freq_shift(data_, axs=[axs[0,1],axs[1,1],axs[0,2],axs[1,2]], min_load_temp=min_load_temp, max_load_temp=max_load_temp,
anchor_range=anchor_range)
plot_noise(anchors, axs=[axs[0,3],axs[1,3]], min_load_temp=min_load_temp, max_load_temp=max_load_temp, max_uK=max_uK)
axs[0,0].set_visible(False)
info = data_.chip_name.iat[0].replace(', ','\n')
info += ('\nResonator: %d @ %.6f MHz' % (resonator_index,data_.f_0_max.iat[0]))
files = np.unique(data_.sweep_filename)
files.sort()
files = files.tolist()
median_temp = np.median(data_.sweep_primary_package_temperature)
temp_rms = np.std(data_.sweep_primary_package_temperature)
info += ('\nfirst file: %s' % files[0][:30])
info += ('\nlast file: %s' % files[-1][:30])
info += ('\nPackage Temperature: %.1f$\pm$%.1f mK' % (median_temp*1000,temp_rms*1000))
info += ('\nPower ~ %.1f dBm\n (%.1f dB atten, %.1f dB cold)' % (data_.power_dbm.iat[0],data_.atten.iat[0],data_.dac_chain_gain.iat[0]))
fig.text(0.1,0.9,info,ha='left',va='top',size='x-small',bbox=dict(facecolor='w',pad=8))
return fig
def plot_net_dataset(data_,pdfname=None,plotdir='/home/data/plots',**kwargs):
"""
Make PDF of plots for all resonators in a dataset.
"""
chip_name = data_.chip_name.iat[0]
chipfname = chip_name.replace(' ','_').replace(',','')
files = np.unique(data_.sweep_filename)
files.sort()
files = files.tolist()
first = os.path.splitext(os.path.split(files[0])[1])[0]
last = os.path.splitext(os.path.split(files[-1])[1])[0]
if pdfname is None:
pdfname = '/home/data/plots/net_summary_%s_%s_to_%s.pdf' % (chipfname,first,last)
pdf = PdfPages(pdfname)
try:
os.chmod(pdfname,0666)
except OSError:
print "could not change permissions of",pdfname
indexes = np.unique(data_.resonator_index)
for index in indexes:
try:
fig = plot_resonator_net(data_, resonator_index=index, **kwargs)
except Exception, e:
print index,e
continue
#title = ('%s\nResonator: %d @ %.6f MHz' % (chip_name,index,data_[data_.resonator_index==index].f_0_max.iat[0]))
#fig.suptitle(title)
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
pdf.savefig(fig,bbox_inches='tight')
fig = plot_load_vs_fractional_freq_shift(data_)
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
pdf.savefig(fig,bbox_inches='tight')
pdf.close()
def refine_dataset(original_data,settings):
"""
Refine a data set based on data cuts specified in the settings dictionary
"""
print len(original_data)
data_ = original_data[original_data.sweep_primary_load_temperature >= settings['valid_load_temp_range'][0]]
print len(data_)
data_ = data_[data_.sweep_primary_load_temperature <= settings['valid_load_temp_range'][1]]
print len(data_)
data_ = data_[data_.f_0_err/data_.f_0 < settings['fractional_f_0_err_limit']]
print len(data_)
data_ = data_[data_.Q_err/data_.Q < settings['fractional_Q_err_limit']]
print len(data_)
data_ = data_[data_.Q >= settings['valid_Q_range'][0]]
data_ = data_[data_.Q <= settings['valid_Q_range'][1]]
print len(data_)
data_.sweep_primary_load_temperature[data_.optical_load=='dark'] = .2
if settings['max_package_temp_deviation'] is not None:
median_temp = np.median(data_.sweep_primary_package_temperature)
temp_deviations = np.abs(data_.sweep_primary_package_temperature - median_temp)
data_ = data_[temp_deviations < settings['max_package_temp_deviation']]
print len(data_)
#data_ = data_.sort(["f_0"])
data_['f_0_max'] = np.zeros((data_.shape[0],))#data_.groupby("resonator_index")["f_0"].transform(lambda x: x.max())
data_['responsivity_Hz_per_K'] = np.zeros((data_.shape[0],))
data_['responsivity_err'] = np.zeros((data_.shape[0],))
data_['responsivity_offset'] = np.zeros((data_.shape[0],))
for index in np.unique(data_.resonator_index):
group = data_[data_.resonator_index == index]
max = group[group.sweep_primary_load_temperature < settings['f_0_max_temp_limit']].f_0.max()
data_.f_0_max[data_.resonator_index == index] = max
data_['delta_f_0_Hz'] = (data_.f_0-data_.f_0_max)*1e6
data_['fractional_delta_f_0'] = data_.delta_f_0_Hz/(1e6*data_.f_0_max)#(1e6*data_.noise_measurement_freq_MHz)
data_['Q_i_err'] = khalil.qi_error(Q = data_.Q, Q_err = data_.Q_err,
Q_e_real = data_.Q_e_real, Q_e_real_err = data_.Q_e_real_err,
Q_e_imag = data_.Q_e_imag, Q_e_imag_err = data_.Q_e_imag_err)
for index in np.unique(data_.resonator_index):
group = data_[(data_.resonator_index == index)&(np.abs(data_.sweep_primary_package_temperature-0.16)<0.04)
&(data_.sweep_primary_load_temperature>3)]
try:
(slope,offset),cov = np.polyfit(group.sweep_primary_load_temperature,group.delta_f_0_Hz,1,cov=True)
print slope
data_.responsivity_Hz_per_K[data_.resonator_index == index] = slope
data_.responsivity_offset[data_.resonator_index == index] = offset
data_.responsivity_err[data_.resonator_index == index] = np.sqrt(cov[1,1])
except ValueError:
continue
except TypeError:
continue
except np.linalg.LinAlgError:
continue
eigvals_Hz = []
nets = []
for eigvals,freq,responsivity in zip(data_.pca_eigvals,data_.noise_measurement_freq_MHz,data_.responsivity_Hz_per_K):
# Convert eigvals spectra from 1/Hz units to Hz/sqrt(Hz)
spectrum_Hz = np.sqrt(eigvals)*freq*1e6
eigvals_Hz.append(spectrum_Hz)
# Calculate net in muK sqrt(s). In the following, 1e6 is K -> uK factor, and sqrt(2) is 1/sqrt(Hz) -> sqrt(s) factor
net = (1e6*spectrum_Hz/abs(responsivity))/np.sqrt(2)
nets.append(net)
data_['pca_eigvals_Hz_per_rootHz'] = eigvals_Hz
data_['net_uK_rootsec'] = nets
return data_
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions raised by the Horizon code and the machinery for handling them.
"""
import logging
import os
import sys
from django.contrib.auth import logout # noqa
from django.core.management import color_style # noqa
from django.http import HttpRequest # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from django.views.debug import CLEANSED_SUBSTITUTE # noqa
from django.views.debug import SafeExceptionReporterFilter # noqa
from horizon.conf import HORIZON_CONFIG # noqa
from horizon import messages
LOG = logging.getLogger(__name__)
class HorizonReporterFilter(SafeExceptionReporterFilter):
"""Error report filter that's always active, even in DEBUG mode."""
def is_active(self, request):
return True
# TODO(gabriel): This bugfix is cribbed from Django's code. When 1.4.1
# is available we can remove this code.
def get_traceback_frame_variables(self, request, tb_frame):
"""Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper'
in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper,
'sensitive_variables',
None)
break
current_frame = current_frame.f_back
cleansed = []
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed.append((name, CLEANSED_SUBSTITUTE))
return cleansed
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
else:
# Potentially cleanse only the request if it's one of the
# frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
class HorizonException(Exception):
"""Base exception class for distinguishing our own exception classes."""
pass
class Http302(HorizonException):
"""Error class which can be raised from within a handler to cause an
early bailout and redirect at the middleware level.
"""
status_code = 302
def __init__(self, location, message=None):
self.location = location
self.message = message
class NotAuthorized(HorizonException):
"""Raised whenever a user attempts to access a resource which they do not
have permission-based access to (such as when failing the
:func:`~horizon.decorators.require_perms` decorator).
The included :class:`~horizon.middleware.HorizonMiddleware` catches
``NotAuthorized`` and handles it gracefully by displaying an error
message and redirecting the user to a login page.
"""
status_code = 401
class NotAuthenticated(HorizonException):
"""Raised when a user is trying to make requests and they are not logged
in.
The included :class:`~horizon.middleware.HorizonMiddleware` catches
``NotAuthenticated`` and handles it gracefully by displaying an error
message and redirecting the user to a login page.
"""
status_code = 403
class NotFound(HorizonException):
"""Generic error to replace all "Not Found"-type API errors."""
status_code = 404
class Conflict(HorizonException):
"""Generic error to replace all "Conflict"-type API errors."""
status_code = 409
class RecoverableError(HorizonException):
"""Generic error to replace any "Recoverable"-type API errors."""
status_code = 100 # HTTP status code "Continue"
class ServiceCatalogException(HorizonException):
"""Raised when a requested service is not available in the
``ServiceCatalog`` returned by Keystone.
"""
def __init__(self, service_name):
message = 'Invalid service catalog service: %s' % service_name
super(ServiceCatalogException, self).__init__(message)
class AlreadyExists(HorizonException):
"""Exception to be raised when trying to create an API resource which
already exists.
"""
def __init__(self, name, resource_type):
self.attrs = {"name": name, "resource": resource_type}
self.msg = 'A %(resource)s with the name "%(name)s" already exists.'
def __repr__(self):
return self.msg % self.attrs
def __str__(self):
return self.msg % self.attrs
def __unicode__(self):
return _(self.msg) % self.attrs
class WorkflowError(HorizonException):
"""Exception to be raised when something goes wrong in a workflow."""
pass
class WorkflowValidationError(HorizonException):
"""Exception raised during workflow validation if required data is missing,
or existing data is not valid.
"""
pass
class HandledException(HorizonException):
"""Used internally to track exceptions that have gone through
:func:`horizon.exceptions.handle` more than once.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
UNAUTHORIZED = tuple(HORIZON_CONFIG['exceptions']['unauthorized'])
NOT_FOUND = tuple(HORIZON_CONFIG['exceptions']['not_found'])
RECOVERABLE = (AlreadyExists, Conflict,)
RECOVERABLE += tuple(HORIZON_CONFIG['exceptions']['recoverable'])
def error_color(msg):
return color_style().ERROR_OUTPUT(msg)
def check_message(keywords, message):
"""Checks an exception for given keywords and raises a new ``ActionError``
with the desired message if the keywords are found. This allows selective
control over API error messages.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
if set(str(exc_value).split(" ")).issuperset(set(keywords)):
exc_value._safe_message = message
raise
def handle(request, message=None, redirect=None, ignore=False,
escalate=False, log_level=None, force_log=None):
"""Centralized error handling for Horizon.
Because Horizon consumes so many different APIs with completely
different ``Exception`` types, it's necessary to have a centralized
place for handling exceptions which may be raised.
Exceptions are roughly divided into 3 types:
#. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
problems. These result in being logged out and sent to the login screen.
#. ``NOT_FOUND``: Errors resulting from objects which could not be
located via the API. These generally result in a user-facing error
message, but are otherwise returned to the normal code flow. Optionally
a redirect value may be passed to the error handler so users are
returned to a different view than the one requested in addition to the
error message.
#. RECOVERABLE: Generic API errors which generate a user-facing message
but drop directly back to the regular code flow.
All other exceptions bubble the stack as normal unless the ``ignore``
argument is passed in as ``True``, in which case only unrecognized
errors are bubbled.
If the exception is not re-raised, an appropriate wrapper exception
class indicating the type of exception that was encountered will be
returned.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
log_method = getattr(LOG, log_level or "exception")
force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False)
force_silence = getattr(exc_value, "silence_logging", False)
# Because the same exception may travel through this method more than
# once (if it's re-raised) we may want to treat it differently
# the second time (e.g. no user messages/logging).
handled = issubclass(exc_type, HandledException)
wrap = False
# Restore our original exception information, but re-wrap it at the end
if handled:
exc_type, exc_value, exc_traceback = exc_value.wrapped
wrap = True
# We trust messages from our own exceptions
if issubclass(exc_type, HorizonException):
message = exc_value
# Check for an override message
elif getattr(exc_value, "_safe_message", None):
message = exc_value._safe_message
# If the message has a placeholder for the exception, fill it in
elif message and "%(exc)s" in message:
message = message % {"exc": exc_value}
if issubclass(exc_type, UNAUTHORIZED):
if ignore:
return NotAuthorized
if not force_silence and not handled:
log_method(error_color("Unauthorized: %s" % exc_value))
if not handled:
if message:
message = _("Unauthorized: %s") % message
# We get some pretty useless error messages back from
# some clients, so let's define our own fallback.
fallback = _("Unauthorized. Please try logging in again.")
messages.error(request, message or fallback)
# Escalation means logging the user out and raising NotAuthorized
# so the middleware will redirect them appropriately.
if escalate:
logout(request)
raise NotAuthorized
# Otherwise continue and present our "unauthorized" error message.
return NotAuthorized
if issubclass(exc_type, NOT_FOUND):
wrap = True
if not force_silence and not handled and (not ignore or force_log):
log_method(error_color("Not Found: %s" % exc_value))
if not ignore and not handled:
messages.error(request, message or exc_value)
if redirect:
raise Http302(redirect)
if not escalate:
return NotFound # return to normal code flow
if issubclass(exc_type, RECOVERABLE):
wrap = True
if not force_silence and not handled and (not ignore or force_log):
# Default recoverable error to WARN log level
log_method = getattr(LOG, log_level or "warning")
log_method(error_color("Recoverable error: %s" % exc_value))
if not ignore and not handled:
messages.error(request, message or exc_value)
if redirect:
raise Http302(redirect)
if not escalate:
return RecoverableError # return to normal code flow
# If we've gotten here, time to wrap and/or raise our exception.
if wrap:
raise HandledException([exc_type, exc_value, exc_traceback])
raise exc_type, exc_value, exc_traceback
|
|
from collections import OrderedDict
from enum import IntEnum
from math import log, exp
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db import models
from django.db.models import Q
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from . import base
from .items import GameItem, ItemQuantity
class Monster(models.Model, base.Elements, base.Stars, base.Archetype):
AWAKEN_LEVEL_INCOMPLETE = -1 # Japan fusion
AWAKEN_LEVEL_UNAWAKENED = 0
AWAKEN_LEVEL_AWAKENED = 1
AWAKEN_LEVEL_SECOND = 2
AWAKEN_CHOICES = (
(AWAKEN_LEVEL_UNAWAKENED, 'Unawakened'),
(AWAKEN_LEVEL_AWAKENED, 'Awakened'),
(AWAKEN_LEVEL_SECOND, 'Second Awakening'),
(AWAKEN_LEVEL_INCOMPLETE, 'Incomplete'),
)
COM2US_AWAKEN_MAP = {
-1: AWAKEN_LEVEL_INCOMPLETE,
0: AWAKEN_LEVEL_UNAWAKENED,
1: AWAKEN_LEVEL_AWAKENED,
2: AWAKEN_LEVEL_SECOND,
}
name = models.CharField(max_length=40)
com2us_id = models.IntegerField(blank=True, null=True, help_text='ID given in game data files')
family_id = models.IntegerField(blank=True, null=True, help_text='Identifier that matches same family monsters')
skill_group_id = models.IntegerField(blank=True, null=True, help_text='Identifier that matches same skillup monsters (i.e. Street Figher monsters with C2U counterparts)')
image_filename = models.CharField(max_length=250, null=True, blank=True)
element = models.CharField(max_length=6, choices=base.Elements.ELEMENT_CHOICES, default=base.Elements.ELEMENT_FIRE)
archetype = models.CharField(max_length=10, choices=base.Archetype.ARCHETYPE_CHOICES, default=base.Archetype.ARCHETYPE_ATTACK)
base_stars = models.IntegerField(choices=base.Stars.STAR_CHOICES, help_text='Display stars in game')
natural_stars = models.IntegerField(choices=base.Stars.STAR_CHOICES, help_text="Stars of the monster's lowest awakened form")
obtainable = models.BooleanField(default=True, help_text='Is available for players to acquire')
can_awaken = models.BooleanField(default=True, help_text='Has an awakened form')
is_awakened = models.BooleanField(default=False, help_text='Is the awakened form')
awaken_level = models.IntegerField(default=AWAKEN_LEVEL_UNAWAKENED, choices=AWAKEN_CHOICES, help_text='Awakening level')
awaken_bonus = models.TextField(blank=True, help_text='Bonus given upon awakening')
awakens_to = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='+',
help_text='Awakened form of this monster'
)
awakens_from = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='+',
help_text='Unawakened form of this monster'
)
awaken_cost = models.ManyToManyField(GameItem, through='AwakenCost', related_name='+')
skills = models.ManyToManyField('Skill', blank=True)
skill_ups_to_max = models.IntegerField(null=True, blank=True, help_text='Number of skill-ups required to max all skills')
leader_skill = models.ForeignKey('LeaderSkill', on_delete=models.SET_NULL, null=True, blank=True)
# 1-star lvl 1 values from data source
raw_hp = models.IntegerField(null=True, blank=True, help_text='HP value from game data files')
raw_attack = models.IntegerField(null=True, blank=True, help_text='ATK value from game data files')
raw_defense = models.IntegerField(null=True, blank=True, help_text='DEF value from game data files')
# Base-star lvl MAX values as seen in-game
base_hp = models.IntegerField(null=True, blank=True, help_text='HP at base_stars lvl 1')
base_attack = models.IntegerField(null=True, blank=True, help_text='ATK at base_stars lvl 1')
base_defense = models.IntegerField(null=True, blank=True, help_text='DEF at base_stars lvl 1')
# 6-star lvl MAX values
max_lvl_hp = models.IntegerField(null=True, blank=True, help_text='HP at 6-stars lvl 40')
max_lvl_attack = models.IntegerField(null=True, blank=True, help_text='ATK at 6-stars lvl 40')
max_lvl_defense = models.IntegerField(null=True, blank=True, help_text='DEF at 6-stars lvl 40')
speed = models.IntegerField(null=True, blank=True)
crit_rate = models.IntegerField(null=True, blank=True)
crit_damage = models.IntegerField(null=True, blank=True)
resistance = models.IntegerField(null=True, blank=True)
accuracy = models.IntegerField(null=True, blank=True)
# Homunculus monster fields
homunculus = models.BooleanField(default=False)
craft_materials = models.ManyToManyField(GameItem, through='MonsterCraftCost', related_name='+')
craft_cost = models.IntegerField(null=True, blank=True, help_text='Mana cost to craft this monster')
transforms_to = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='transforms_from',
help_text='Monster which this monster can transform into during battle'
)
awaken_mats_fire_low = models.IntegerField(blank=True, default=0)
awaken_mats_fire_mid = models.IntegerField(blank=True, default=0)
awaken_mats_fire_high = models.IntegerField(blank=True, default=0)
awaken_mats_water_low = models.IntegerField(blank=True, default=0)
awaken_mats_water_mid = models.IntegerField(blank=True, default=0)
awaken_mats_water_high = models.IntegerField(blank=True, default=0)
awaken_mats_wind_low = models.IntegerField(blank=True, default=0)
awaken_mats_wind_mid = models.IntegerField(blank=True, default=0)
awaken_mats_wind_high = models.IntegerField(blank=True, default=0)
awaken_mats_light_low = models.IntegerField(blank=True, default=0)
awaken_mats_light_mid = models.IntegerField(blank=True, default=0)
awaken_mats_light_high = models.IntegerField(blank=True, default=0)
awaken_mats_dark_low = models.IntegerField(blank=True, default=0)
awaken_mats_dark_mid = models.IntegerField(blank=True, default=0)
awaken_mats_dark_high = models.IntegerField(blank=True, default=0)
awaken_mats_magic_low = models.IntegerField(blank=True, default=0)
awaken_mats_magic_mid = models.IntegerField(blank=True, default=0)
awaken_mats_magic_high = models.IntegerField(blank=True, default=0)
source = models.ManyToManyField('Source', blank=True, help_text='Where this monster can be acquired from')
farmable = models.BooleanField(default=False, help_text='Monster can be acquired easily without luck')
fusion_food = models.BooleanField(default=False, help_text='Monster is used as a fusion ingredient')
bestiary_slug = models.SlugField(max_length=255, editable=False, null=True)
def image_url(self):
if self.image_filename:
return mark_safe('<img src="%s" height="42" width="42" loading="lazy" />' % static('herders/images/monsters/' + self.image_filename))
else:
return 'No Image'
def max_level_from_stars(self, stars=None):
if stars:
return 10 + stars * 5
else:
return 10 + self.base_stars * 5
def get_stats(self, grade, level):
all_stats = {
base.Stats.STAT_HP: self._calculate_actual_stat(self.raw_hp, grade, level) * 15,
base.Stats.STAT_DEF: self._calculate_actual_stat(self.raw_defense, grade, level),
base.Stats.STAT_ATK: self._calculate_actual_stat(self.raw_attack, grade, level),
base.Stats.STAT_SPD: self.speed,
base.Stats.STAT_CRIT_RATE_PCT: self.crit_rate,
base.Stats.STAT_CRIT_DMG_PCT: self.crit_damage,
base.Stats.STAT_RESIST_PCT: self.resistance,
base.Stats.STAT_ACCURACY_PCT: self.accuracy,
}
return all_stats
def get_stats_for_all_stars(self):
from collections import OrderedDict
start_grade = self.base_stars
stats_list = OrderedDict()
if self.is_awakened and self.base_stars > 1:
start_grade -= 1
for grade in range(start_grade, 7):
max_level = self.max_level_from_stars(grade)
# Add the actual calculated stats
stats_list[str(grade)] = {
'HP': self.actual_hp(grade, max_level),
'ATK': self.actual_attack(grade, max_level),
'DEF': self.actual_defense(grade, max_level),
}
return stats_list
def actual_hp(self, grade, level):
# Check that base stat exists first
if not self.raw_hp:
return None
else:
return self._calculate_actual_stat(self.raw_hp, grade, level) * 15
def actual_attack(self, grade=base_stars, level=1):
# Check that base stat exists first
if not self.raw_attack:
return None
else:
return self._calculate_actual_stat(self.raw_attack, grade, level)
def actual_defense(self, grade=base_stars, level=1):
# Check that base stat exists first
if not self.raw_defense:
return None
else:
return self._calculate_actual_stat(self.raw_defense, grade, level)
@staticmethod
def _calculate_actual_stat(stat, grade, level):
if stat is None:
return None
# Magic multipliers taken from summoner's war wikia calculator. Used to calculate stats for lvl 1 and lvl MAX
magic_multipliers = [
{'1': 1.0, 'max': 1.9958},
{'1': 1.9958, 'max': 3.03050646},
{'1': 3.03050646, 'max': 4.364426603},
{'1': 4.364426603, 'max': 5.941390935},
{'1': 5.941390935, 'max': 8.072330795},
{'1': 8.072330795, 'max': 10.97901633},
]
max_lvl = 10 + grade * 5
stat_lvl_1 = round(stat * magic_multipliers[grade - 1]['1'], 0)
stat_lvl_max = round(stat * magic_multipliers[grade - 1]['max'], 0)
if level == 1:
return int(stat_lvl_1)
elif level == max_lvl:
return int(stat_lvl_max)
else:
# Use exponential function in format value=ae^(bx)
# a=stat_lvl_1*e^(-b)
b_coeff = log(stat_lvl_max / stat_lvl_1) / (max_lvl - 1)
return int(round((stat_lvl_1 * exp(-b_coeff)) * exp(b_coeff * level)))
@property
def base_monster(self):
if self.awakens_from is not None and self.awakens_from.obtainable:
return self.awakens_from.base_monster
return self
def monster_family(self):
should_be_shown = Q(obtainable=True) | Q(transforms_to__isnull=False)
has_awakened_version = Q(can_awaken=True) & Q(awakens_to__isnull=False)
return Monster.objects.filter(should_be_shown, family_id=self.family_id).exclude(has_awakened_version).order_by('com2us_id')
def get_awakening_materials(self):
mats = OrderedDict()
mats['magic'] = OrderedDict()
mats['magic']['low'] = self.awaken_mats_magic_low
mats['magic']['mid'] = self.awaken_mats_magic_mid
mats['magic']['high'] = self.awaken_mats_magic_high
mats['fire'] = OrderedDict()
mats['fire']['low'] = self.awaken_mats_fire_low
mats['fire']['mid'] = self.awaken_mats_fire_mid
mats['fire']['high'] = self.awaken_mats_fire_high
mats['water'] = OrderedDict()
mats['water']['low'] = self.awaken_mats_water_low
mats['water']['mid'] = self.awaken_mats_water_mid
mats['water']['high'] = self.awaken_mats_water_high
mats['wind'] = OrderedDict()
mats['wind']['low'] = self.awaken_mats_wind_low
mats['wind']['mid'] = self.awaken_mats_wind_mid
mats['wind']['high'] = self.awaken_mats_wind_high
mats['light'] = OrderedDict()
mats['light']['low'] = self.awaken_mats_light_low
mats['light']['mid'] = self.awaken_mats_light_mid
mats['light']['high'] = self.awaken_mats_light_high
mats['dark'] = OrderedDict()
mats['dark']['low'] = self.awaken_mats_dark_low
mats['dark']['mid'] = self.awaken_mats_dark_mid
mats['dark']['high'] = self.awaken_mats_dark_high
return mats
def clean(self):
# Update null values
if self.awaken_mats_fire_high is None:
self.awaken_mats_fire_high = 0
if self.awaken_mats_fire_mid is None:
self.awaken_mats_fire_mid = 0
if self.awaken_mats_fire_low is None:
self.awaken_mats_fire_low = 0
if self.awaken_mats_water_high is None:
self.awaken_mats_water_high = 0
if self.awaken_mats_water_mid is None:
self.awaken_mats_water_mid = 0
if self.awaken_mats_water_low is None:
self.awaken_mats_water_low = 0
if self.awaken_mats_wind_high is None:
self.awaken_mats_wind_high = 0
if self.awaken_mats_wind_mid is None:
self.awaken_mats_wind_mid = 0
if self.awaken_mats_wind_low is None:
self.awaken_mats_wind_low = 0
if self.awaken_mats_light_high is None:
self.awaken_mats_light_high = 0
if self.awaken_mats_light_mid is None:
self.awaken_mats_light_mid = 0
if self.awaken_mats_light_low is None:
self.awaken_mats_light_low = 0
if self.awaken_mats_dark_high is None:
self.awaken_mats_dark_high = 0
if self.awaken_mats_dark_mid is None:
self.awaken_mats_dark_mid = 0
if self.awaken_mats_dark_low is None:
self.awaken_mats_dark_low = 0
if self.awaken_mats_magic_high is None:
self.awaken_mats_magic_high = 0
if self.awaken_mats_magic_mid is None:
self.awaken_mats_magic_mid = 0
if self.awaken_mats_magic_low is None:
self.awaken_mats_magic_low = 0
super(Monster, self).clean()
def save(self, *args, **kwargs):
# Update null values
if self.awaken_mats_fire_high is None:
self.awaken_mats_fire_high = 0
if self.awaken_mats_fire_mid is None:
self.awaken_mats_fire_mid = 0
if self.awaken_mats_fire_low is None:
self.awaken_mats_fire_low = 0
if self.awaken_mats_water_high is None:
self.awaken_mats_water_high = 0
if self.awaken_mats_water_mid is None:
self.awaken_mats_water_mid = 0
if self.awaken_mats_water_low is None:
self.awaken_mats_water_low = 0
if self.awaken_mats_wind_high is None:
self.awaken_mats_wind_high = 0
if self.awaken_mats_wind_mid is None:
self.awaken_mats_wind_mid = 0
if self.awaken_mats_wind_low is None:
self.awaken_mats_wind_low = 0
if self.awaken_mats_light_high is None:
self.awaken_mats_light_high = 0
if self.awaken_mats_light_mid is None:
self.awaken_mats_light_mid = 0
if self.awaken_mats_light_low is None:
self.awaken_mats_light_low = 0
if self.awaken_mats_dark_high is None:
self.awaken_mats_dark_high = 0
if self.awaken_mats_dark_mid is None:
self.awaken_mats_dark_mid = 0
if self.awaken_mats_dark_low is None:
self.awaken_mats_dark_low = 0
if self.awaken_mats_magic_high is None:
self.awaken_mats_magic_high = 0
if self.awaken_mats_magic_mid is None:
self.awaken_mats_magic_mid = 0
if self.awaken_mats_magic_low is None:
self.awaken_mats_magic_low = 0
if self.raw_hp:
self.base_hp = self._calculate_actual_stat(
self.raw_hp,
self.base_stars,
self.max_level_from_stars(self.base_stars)
) * 15
self.max_lvl_hp = self.actual_hp(6, 40)
if self.raw_attack:
self.base_attack = self._calculate_actual_stat(
self.raw_attack,
self.base_stars,
self.max_level_from_stars(self.base_stars)
)
self.max_lvl_attack = self.actual_attack(6, 40)
if self.raw_defense:
self.base_defense = self._calculate_actual_stat(
self.raw_defense,
self.base_stars,
self.max_level_from_stars(self.base_stars)
)
self.max_lvl_defense = self.actual_defense(6, 40)
if self.is_awakened and self.awakens_from:
self.bestiary_slug = self.awakens_from.bestiary_slug
else:
if self.awakens_to is not None:
self.bestiary_slug = slugify(" ".join([str(self.com2us_id), self.element, self.name, self.awakens_to.name]))
else:
self.bestiary_slug = slugify(" ".join([str(self.com2us_id), self.element, self.name]))
super(Monster, self).save(*args, **kwargs)
class Meta:
ordering = ['name', 'element']
def __str__(self):
if self.is_awakened:
return self.name
else:
return self.name + ' (' + self.element.capitalize() + ')'
class AwakenCost(ItemQuantity):
monster = models.ForeignKey(Monster, on_delete=models.CASCADE)
class AwakenBonusType(IntEnum):
NONE = 0
STAT_BONUS = 1
NEW_SKILL = 2
LEADER_SKILL = 3
STRENGTHEN_SKILL = 4
SECONDARY_AWAKENING = 6
ONLY_AWAKENED = 7
class MonsterCraftCost(ItemQuantity):
monster = models.ForeignKey(Monster, on_delete=models.CASCADE)
class Fusion(models.Model):
product = models.OneToOneField('Monster', on_delete=models.CASCADE, related_name='fusion')
cost = models.IntegerField()
ingredients = models.ManyToManyField('Monster', related_name='fusion_ingredient_for')
meta_order = models.IntegerField(db_index=True, default=0)
def __str__(self):
return str(self.product) + ' Fusion'
class Meta:
ordering = ['meta_order']
def sub_fusion_available(self):
return Fusion.objects.filter(product__in=self.ingredients.values_list('awakens_from__pk', flat=True)).exists()
def total_awakening_cost(self, owned_ingredients=None):
cost = {
'magic': {
'low': 0,
'mid': 0,
'high': 0,
},
'fire': {
'low': 0,
'mid': 0,
'high': 0,
},
'water': {
'low': 0,
'mid': 0,
'high': 0,
},
'wind': {
'low': 0,
'mid': 0,
'high': 0,
},
'light': {
'low': 0,
'mid': 0,
'high': 0,
},
'dark': {
'low': 0,
'mid': 0,
'high': 0,
},
}
if owned_ingredients:
qs = self.ingredients.exclude(pk__in=[o.monster.pk for o in owned_ingredients])
else:
qs = self.ingredients.all()
for ingredient in qs:
if ingredient.awakens_from:
cost['magic']['low'] += ingredient.awakens_from.awaken_mats_magic_low
cost['magic']['mid'] += ingredient.awakens_from.awaken_mats_magic_mid
cost['magic']['high'] += ingredient.awakens_from.awaken_mats_magic_high
cost['fire']['low'] += ingredient.awakens_from.awaken_mats_fire_low
cost['fire']['mid'] += ingredient.awakens_from.awaken_mats_fire_mid
cost['fire']['high'] += ingredient.awakens_from.awaken_mats_fire_high
cost['water']['low'] += ingredient.awakens_from.awaken_mats_water_low
cost['water']['mid'] += ingredient.awakens_from.awaken_mats_water_mid
cost['water']['high'] += ingredient.awakens_from.awaken_mats_water_high
cost['wind']['low'] += ingredient.awakens_from.awaken_mats_wind_low
cost['wind']['mid'] += ingredient.awakens_from.awaken_mats_wind_mid
cost['wind']['high'] += ingredient.awakens_from.awaken_mats_wind_high
cost['light']['low'] += ingredient.awakens_from.awaken_mats_light_low
cost['light']['mid'] += ingredient.awakens_from.awaken_mats_light_mid
cost['light']['high'] += ingredient.awakens_from.awaken_mats_light_high
cost['dark']['low'] += ingredient.awakens_from.awaken_mats_dark_low
cost['dark']['mid'] += ingredient.awakens_from.awaken_mats_dark_mid
cost['dark']['high'] += ingredient.awakens_from.awaken_mats_dark_high
return cost
|
|
from . import ast
from ..compat import str_type
from .error import LanguageError
from .lexer import Lexer, TokenKind, get_token_desc, get_token_kind_desc
from .source import Source
__all__ = ['parse']
def parse(source, **kwargs):
"""Given a GraphQL source, parses it into a Document."""
options = {'no_location': False, 'no_source': False}
options.update(kwargs)
source_obj = source
if isinstance(source, str_type):
source_obj = Source(source)
parser = Parser(source_obj, options)
return parse_document(parser)
class Parser(object):
def __init__(self, source, options):
self.lexer = Lexer(source)
self.source = source
self.options = options
self.prev_end = 0
self.token = self.lexer.next_token()
def loc(parser, start):
"""Returns a location object, used to identify the place in
the source that created a given parsed object."""
if parser.options['no_location']:
return None
if parser.options['no_source']:
return {
'start': start,
'end': parser.prev_end
}
return {
'start': start,
'end': parser.prev_end,
'source': parser.source
}
def advance(parser):
"""Moves the internal parser object to the next lexed token."""
prev_end = parser.token.end
parser.prev_end = prev_end
parser.token = parser.lexer.next_token(prev_end)
def peek(parser, kind):
"""Determines if the next token is of a given kind"""
return parser.token.kind == kind
def skip(parser, kind):
"""If the next token is of the given kind, return true after advancing
the parser. Otherwise, do not change the parser state
and return False."""
match = parser.token.kind == kind
if match:
advance(parser)
return match
def expect(parser, kind):
"""If the next token is of the given kind, return that token after
advancing the parser. Otherwise, do not change the parser state and
return False."""
token = parser.token
if token.kind == kind:
advance(parser)
return token
raise LanguageError(
parser.source,
token.start,
u'Expected {}, found {}'.format(
get_token_kind_desc(kind),
get_token_desc(token)
)
)
def expect_keyword(parser, value):
"""If the next token is a keyword with the given value, return that
token after advancing the parser. Otherwise, do not change the parser
state and return False."""
token = parser.token
if token.kind == TokenKind.NAME and token.value == value:
advance(parser)
return token
raise LanguageError(
parser.source,
token.start,
u'Expected "{}", found {}'.format(value, get_token_desc(token))
)
def unexpected(parser, at_token=None):
"""Helper function for creating an error when an unexpected lexed token
is encountered."""
token = at_token or parser.token
return LanguageError(
parser.source,
token.start,
u'Unexpected {}'.format(get_token_desc(token))
)
def any(parser, open_kind, parse_fn, close_kind):
"""Returns a possibly empty list of parse nodes, determined by
the parse_fn. This list begins with a lex token of openKind
and ends with a lex token of closeKind. Advances the parser
to the next lex token after the closing token."""
expect(parser, open_kind)
nodes = []
while not skip(parser, close_kind):
nodes.append(parse_fn(parser))
return nodes
def many(parser, open_kind, parse_fn, close_kind):
"""Returns a non-empty list of parse nodes, determined by
the parse_fn. This list begins with a lex token of openKind
and ends with a lex token of closeKind. Advances the parser
to the next lex token after the closing token."""
expect(parser, open_kind)
nodes = [parse_fn(parser)]
while not skip(parser, close_kind):
nodes.append(parse_fn(parser))
return nodes
def parse_name(parser):
"""Converts a name lex token into a name parse node."""
token = expect(parser, TokenKind.NAME)
return ast.Name(
value=token.value,
loc=loc(parser, token.start)
)
# Implements the parsing rules in the Document section.
def parse_document(parser):
start = parser.token.start
definitions = []
while True:
if peek(parser, TokenKind.BRACE_L):
definitions.append(parse_operation_definition(parser))
elif peek(parser, TokenKind.NAME):
if parser.token.value in ('query', 'mutation'):
definitions.append(parse_operation_definition(parser))
elif parser.token.value == 'fragment':
definitions.append(parse_fragment_definition(parser))
else:
raise unexpected(parser)
else:
raise unexpected(parser)
if skip(parser, TokenKind.EOF):
break
return ast.Document(
definitions=definitions,
loc=loc(parser, start)
)
# Implements the parsing rules in the Operations section.
def parse_operation_definition(parser):
start = parser.token.start
if peek(parser, TokenKind.BRACE_L):
return ast.OperationDefinition(
operation='query',
name=None,
variable_definitions=None,
directives=[],
selection_set=parse_selection_set(parser),
loc=loc(parser, start)
)
operation_token = expect(parser, TokenKind.NAME)
operation = operation_token.value
return ast.OperationDefinition(
operation=operation,
name=parse_name(parser),
variable_definitions=parse_variable_definitions(parser),
directives=parse_directives(parser),
selection_set=parse_selection_set(parser),
loc=loc(parser, start)
)
def parse_variable_definitions(parser):
if peek(parser, TokenKind.PAREN_L):
return many(
parser,
TokenKind.PAREN_L,
parse_variable_definition,
TokenKind.PAREN_R
)
return []
def parse_variable_definition(parser):
start = parser.token.start
variable = parse_variable(parser)
type = (expect(parser, TokenKind.COLON), parse_type(parser))[1]
if skip(parser, TokenKind.EQUALS):
default_value = parse_value(parser, True)
else:
default_value = None
return ast.VariableDefinition(
variable=variable,
type=type,
default_value=default_value,
loc=loc(parser, start)
)
def parse_variable(parser):
start = parser.token.start
expect(parser, TokenKind.DOLLAR)
return ast.Variable(
name=parse_name(parser),
loc=loc(parser, start)
)
def parse_selection_set(parser):
start = parser.token.start
return ast.SelectionSet(
selections=many(parser, TokenKind.BRACE_L, parse_selection, TokenKind.BRACE_R),
loc=loc(parser, start)
)
def parse_selection(parser):
if peek(parser, TokenKind.SPREAD):
return parse_fragment(parser)
else:
return parse_field(parser)
def parse_field(parser):
# Corresponds to both Field and Alias in the spec
start = parser.token.start
name_or_alias = parse_name(parser)
if skip(parser, TokenKind.COLON):
alias = name_or_alias
name = parse_name(parser)
else:
alias = None
name = name_or_alias
arguments = parse_arguments(parser)
directives = parse_directives(parser)
if peek(parser, TokenKind.BRACE_L):
selection_set = parse_selection_set(parser)
else:
selection_set = None
return ast.Field(
alias=alias,
name=name,
arguments=arguments,
directives=directives,
selection_set=selection_set,
loc=loc(parser, start)
)
def parse_arguments(parser):
if peek(parser, TokenKind.PAREN_L):
return many(
parser, TokenKind.PAREN_L,
parse_argument, TokenKind.PAREN_R)
return []
def parse_argument(parser):
start = parser.token.start
return ast.Argument(
name=parse_name(parser),
value=(
expect(parser, TokenKind.COLON),
parse_value(parser, False))[1],
loc=loc(parser, start)
)
# Implements the parsing rules in the Fragments section.
def parse_fragment(parser):
# Corresponds to both FragmentSpread and InlineFragment in the spec
start = parser.token.start
expect(parser, TokenKind.SPREAD)
if parser.token.value == 'on':
advance(parser)
return ast.InlineFragment(
type_condition=parse_named_type(parser),
directives=parse_directives(parser),
selection_set=parse_selection_set(parser),
loc=loc(parser, start)
)
return ast.FragmentSpread(
name=parse_name(parser),
directives=parse_directives(parser),
loc=loc(parser, start)
)
def parse_fragment_definition(parser):
start = parser.token.start
expect_keyword(parser, 'fragment')
return ast.FragmentDefinition(
name=parse_name(parser),
type_condition=(
expect_keyword(parser, 'on'),
parse_named_type(parser))[1],
directives=parse_directives(parser),
selection_set=parse_selection_set(parser),
loc=loc(parser, start)
)
# Implements the parsing rules in the Values section.
def parse_variable_value(parser):
return parse_value(parser, False)
def parse_const_value(parser):
return parse_value(parser, True)
def parse_value(parser, is_const):
token = parser.token
if token.kind == TokenKind.BRACKET_L:
return parse_array(parser, is_const)
elif token.kind == TokenKind.BRACE_L:
return parse_object(parser, is_const)
elif token.kind == TokenKind.INT:
advance(parser)
return ast.IntValue(value=token.value, loc=loc(parser, token.start))
elif token.kind == TokenKind.FLOAT:
advance(parser)
return ast.FloatValue(value=token.value, loc=loc(parser, token.start))
elif token.kind == TokenKind.STRING:
advance(parser)
return ast.StringValue(value=token.value, loc=loc(parser, token.start))
elif token.kind == TokenKind.NAME:
advance(parser)
if token.value in ('true', 'false'):
return ast.BooleanValue(value=token.value == 'true', loc=loc(parser, token.start))
return ast.EnumValue(value=token.value, loc=loc(parser, token.start))
elif token.kind == TokenKind.DOLLAR:
if not is_const:
return parse_variable(parser)
raise unexpected(parser)
def parse_array(parser, is_const):
start = parser.token.start
if is_const:
item = parse_const_value
else:
item = parse_variable_value
return ast.ListValue(
values=any(
parser, TokenKind.BRACKET_L,
item, TokenKind.BRACKET_R),
loc=loc(parser, start)
)
def parse_object(parser, is_const):
start = parser.token.start
expect(parser, TokenKind.BRACE_L)
fields = []
while not skip(parser, TokenKind.BRACE_R):
fields.append(parse_object_field(parser, is_const))
return ast.ObjectValue(fields=fields, loc=loc(parser, start))
def parse_object_field(parser, is_const):
start = parser.token.start
return ast.ObjectField(
name=parse_name(parser),
value=(
expect(parser, TokenKind.COLON),
parse_value(parser, is_const))[1],
loc=loc(parser, start)
)
# Implements the parsing rules in the Directives section.
def parse_directives(parser):
directives = []
while peek(parser, TokenKind.AT):
directives.append(parse_directive(parser))
return directives
def parse_directive(parser):
start = parser.token.start
expect(parser, TokenKind.AT)
node = ast.Directive(
name=parse_name(parser),
arguments=parse_arguments(parser),
loc=loc(parser, start),
)
return node
# Implements the parsing rules in the Types section.
def parse_type(parser):
"""Handles the 'Type': TypeName, ListType, and NonNullType
parsing rules."""
start = parser.token.start
type = None
if skip(parser, TokenKind.BRACKET_L):
type = parse_type(parser)
expect(parser, TokenKind.BRACKET_R)
type = ast.ListType(type=type, loc=loc(parser, start))
else:
type = parse_named_type(parser)
if skip(parser, TokenKind.BANG):
return ast.NonNullType(type=type, loc=loc(parser, start))
return type
def parse_named_type(parser):
start = parser.token.start
return ast.NamedType(
name=parse_name(parser),
loc=loc(parser, start),
)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Key range representation and splitting."""
import os
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
simplejson = None
from google.appengine.api import datastore
from google.appengine.api import namespace_manager
from google.appengine.datastore import datastore_pb
from google.appengine.ext import db
try:
from google.appengine.ext import ndb
except ImportError:
ndb = None
# It is acceptable to set key_range.ndb to the ndb module,
# imported through some other way (e.g. from the app dir).
class Error(Exception):
"""Base class for exceptions in this module."""
class KeyRangeError(Error):
"""Error while trying to generate a KeyRange."""
class SimplejsonUnavailableError(Error):
"""Error using json functionality with unavailable json and simplejson."""
def _IsNdbQuery(query):
return ndb is not None and isinstance(query, ndb.Query)
class KeyRange(object):
"""Represents a range of keys in the datastore.
A KeyRange object represents a key range
(key_start, include_start, key_end, include_end)
and a scan direction (KeyRange.DESC or KeyRange.ASC).
"""
DESC = "DESC"
ASC = "ASC"
def __init__(self,
key_start=None,
key_end=None,
direction=None,
include_start=True,
include_end=True,
namespace=None,
_app=None):
"""Initialize a KeyRange object.
Args:
key_start: The starting key for this range (db.Key or ndb.Key).
key_end: The ending key for this range (db.Key or ndb.Key).
direction: The direction of the query for this range.
include_start: Whether the start key should be included in the range.
include_end: Whether the end key should be included in the range.
namespace: The namespace for this range. If None then the current
namespace is used.
NOTE: If NDB keys are passed in, they are converted to db.Key
instances before being stored.
"""
if direction is None:
direction = KeyRange.ASC
assert direction in (KeyRange.ASC, KeyRange.DESC)
self.direction = direction
if ndb is not None:
if isinstance(key_start, ndb.Key):
key_start = key_start.to_old_key()
if isinstance(key_end, ndb.Key):
key_end = key_end.to_old_key()
self.key_start = key_start
self.key_end = key_end
self.include_start = include_start
self.include_end = include_end
if namespace is not None:
self.namespace = namespace
else:
self.namespace = namespace_manager.get_namespace()
self._app = _app
def __str__(self):
if self.include_start:
left_side = "["
else:
left_side = "("
if self.include_end:
right_side = "]"
else:
right_side = ")"
return "%s%s%r to %r%s" % (self.direction, left_side, self.key_start,
self.key_end, right_side)
def __repr__(self):
return ("key_range.KeyRange(key_start=%r,key_end=%r,direction=%r,"
"include_start=%r,include_end=%r, namespace=%r)") % (
self.key_start,
self.key_end,
self.direction,
self.include_start,
self.include_end,
self.namespace)
def advance(self, key):
"""Updates the start of the range immediately past the specified key.
Args:
key: A db.Key or ndb.Key.
"""
self.include_start = False
if ndb is not None:
if isinstance(key, ndb.Key):
key = key.to_old_key()
self.key_start = key
def filter_query(self, query, filters=None):
"""Add query filter to restrict to this key range.
Args:
query: A db.Query or ndb.Query instance.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
The input query restricted to this key range.
"""
if ndb is not None:
if _IsNdbQuery(query):
return self.filter_ndb_query(query, filters=filters)
assert not _IsNdbQuery(query)
if filters:
for f in filters:
query.filter("%s %s" % (f[0], f[1]), f[2])
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.filter("__key__ %s" % start_comparator, self.key_start)
if self.key_end:
query.filter("__key__ %s" % end_comparator, self.key_end)
return query
def filter_ndb_query(self, query, filters=None):
"""Add query filter to restrict to this key range.
Args:
query: An ndb.Query instance.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
The input query restricted to this key range.
"""
assert _IsNdbQuery(query)
if filters:
for f in filters:
query = query.filter(ndb.FilterNode(*f))
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query = query.filter(ndb.FilterNode("__key__",
start_comparator,
self.key_start))
if self.key_end:
query = query.filter(ndb.FilterNode("__key__",
end_comparator,
self.key_end))
return query
def filter_datastore_query(self, query, filters=None):
"""Add query filter to restrict to this key range.
Args:
query: A datastore.Query instance.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
The input query restricted to this key range.
"""
assert isinstance(query, datastore.Query)
if filters:
for f in filters:
query.update({"%s %s" % (f[0], f[1]): f[2]})
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.update({"__key__ %s" % start_comparator: self.key_start})
if self.key_end:
query.update({"__key__ %s" % end_comparator: self.key_end})
return query
def __get_direction(self, asc, desc):
"""Check that self.direction is in (KeyRange.ASC, KeyRange.DESC).
Args:
asc: Argument to return if self.direction is KeyRange.ASC
desc: Argument to return if self.direction is KeyRange.DESC
Returns:
asc or desc appropriately
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
if self.direction == KeyRange.ASC:
return asc
elif self.direction == KeyRange.DESC:
return desc
else:
raise KeyRangeError("KeyRange direction unexpected: %s", self.direction)
def make_directed_query(self, kind_class, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind_class: A kind implementation class (a subclass of either
db.Model or ndb.Model).
keys_only: bool, default False, use keys_only on Query?
Returns:
A db.Query or ndb.Query instance (corresponding to kind_class).
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
if ndb is not None:
if issubclass(kind_class, ndb.Model):
return self.make_directed_ndb_query(kind_class, keys_only=keys_only)
assert self._app is None, '_app is not supported for db.Query'
direction = self.__get_direction("", "-")
query = db.Query(kind_class, namespace=self.namespace, keys_only=keys_only)
query.order("%s__key__" % direction)
query = self.filter_query(query)
return query
def make_directed_ndb_query(self, kind_class, keys_only=False):
"""Construct an NDB query for this key range, including the scan direction.
Args:
kind_class: An ndb.Model subclass.
keys_only: bool, default False, use keys_only on Query?
Returns:
An ndb.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
assert issubclass(kind_class, ndb.Model)
if keys_only:
default_options = ndb.QueryOptions(keys_only=True)
else:
default_options = None
query = kind_class.query(app=self._app,
namespace=self.namespace,
default_options=default_options)
query = self.filter_ndb_query(query)
if self.__get_direction(True, False):
query = query.order(kind_class._key)
else:
query = query.order(-kind_class._key)
return query
def make_directed_datastore_query(self, kind, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
Returns:
A datastore.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
direction = self.__get_direction(datastore.Query.ASCENDING,
datastore.Query.DESCENDING)
query = datastore.Query(kind, _app=self._app, keys_only=keys_only)
query.Order(("__key__", direction))
query = self.filter_datastore_query(query)
return query
def make_ascending_query(self, kind_class, keys_only=False, filters=None):
"""Construct a query for this key range without setting the scan direction.
Args:
kind_class: A kind implementation class (a subclass of either
db.Model or ndb.Model).
keys_only: bool, default False, query only for keys.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
A db.Query or ndb.Query instance (corresponding to kind_class).
"""
if ndb is not None:
if issubclass(kind_class, ndb.Model):
return self.make_ascending_ndb_query(
kind_class, keys_only=keys_only, filters=filters)
assert self._app is None, '_app is not supported for db.Query'
query = db.Query(kind_class, namespace=self.namespace, keys_only=keys_only)
query.order("__key__")
query = self.filter_query(query, filters=filters)
return query
def make_ascending_ndb_query(self, kind_class, keys_only=False, filters=None):
"""Construct an NDB query for this key range, without the scan direction.
Args:
kind_class: An ndb.Model subclass.
keys_only: bool, default False, query only for keys.
Returns:
An ndb.Query instance.
"""
assert issubclass(kind_class, ndb.Model)
if keys_only:
default_options = ndb.QueryOptions(keys_only=True)
else:
default_options = None
query = kind_class.query(app=self._app,
namespace=self.namespace,
default_options=default_options)
query = self.filter_ndb_query(query, filters=filters)
query = query.order(kind_class._key)
return query
def make_ascending_datastore_query(self, kind, keys_only=False, filters=None):
"""Construct a query for this key range without setting the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
A datastore.Query instance.
"""
query = datastore.Query(kind,
namespace=self.namespace,
_app=self._app,
keys_only=keys_only)
query.Order(("__key__", datastore.Query.ASCENDING))
query = self.filter_datastore_query(query, filters=filters)
return query
def split_range(self, batch_size=0):
"""Split this key range into a list of at most two ranges.
This method attempts to split the key range approximately in half.
Numeric ranges are split in the middle into two equal ranges and
string ranges are split lexicographically in the middle. If the
key range is smaller than batch_size it is left unsplit.
Note that splitting is done without knowledge of the distribution
of actual entities in the key range, so there is no guarantee (nor
any particular reason to believe) that the entities of the range
are evenly split.
Args:
batch_size: The maximum size of a key range that should not be split.
Returns:
A list of one or two key ranges covering the same space as this range.
"""
key_start = self.key_start
key_end = self.key_end
include_start = self.include_start
include_end = self.include_end
key_pairs = []
if not key_start:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.ASC))
elif not key_end:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.DESC))
else:
key_split = KeyRange.split_keys(key_start, key_end, batch_size)
first_include_end = True
if key_split == key_start:
first_include_end = first_include_end and include_start
key_pairs.append((key_start, include_start,
key_split, first_include_end,
KeyRange.DESC))
second_include_end = include_end
if key_split == key_end:
second_include_end = False
key_pairs.append((key_split, False,
key_end, second_include_end,
KeyRange.ASC))
ranges = [KeyRange(key_start=start,
include_start=include_start,
key_end=end,
include_end=include_end,
direction=direction,
namespace=self.namespace,
_app=self._app)
for (start, include_start, end, include_end, direction)
in key_pairs]
return ranges
def __hash__(self):
raise TypeError('KeyRange is unhashable')
def __cmp__(self, other):
"""Compare two key ranges.
Key ranges with a value of None for key_start or key_end, are always
considered to have include_start=False or include_end=False, respectively,
when comparing. Since None indicates an unbounded side of the range,
the include specifier is meaningless. The ordering generated is total
but somewhat arbitrary.
Args:
other: An object to compare to this one.
Returns:
-1: if this key range is less than other.
0: if this key range is equal to other.
1: if this key range is greater than other.
"""
if not isinstance(other, KeyRange):
return 1
self_list = [self.key_start, self.key_end, self.direction,
self.include_start, self.include_end, self._app,
self.namespace]
if not self.key_start:
self_list[3] = False
if not self.key_end:
self_list[4] = False
other_list = [other.key_start,
other.key_end,
other.direction,
other.include_start,
other.include_end,
other._app,
other.namespace]
if not other.key_start:
other_list[3] = False
if not other.key_end:
other_list[4] = False
return cmp(self_list, other_list)
@staticmethod
def bisect_string_range(start, end):
"""Returns a string that is approximately in the middle of the range.
(start, end) is treated as a string range, and it is assumed
start <= end in the usual lexicographic string ordering. The output key
mid is guaranteed to satisfy start <= mid <= end.
The method proceeds by comparing initial characters of start and
end. When the characters are equal, they are appended to the mid
string. In the first place that the characters differ, the
difference characters are averaged and this average is appended to
the mid string. If averaging resulted in rounding down, and
additional character is added to the mid string to make up for the
rounding down. This extra step is necessary for correctness in
the case that the average of the two characters is equal to the
character in the start string.
This method makes the assumption that most keys are ascii and it
attempts to perform splitting within the ascii range when that
results in a valid split.
Args:
start: A string.
end: A string such that start <= end.
Returns:
A string mid such that start <= mid <= end.
"""
if start == end:
return start
start += "\0"
end += "\0"
midpoint = []
expected_max = 127
for i in range(min(len(start), len(end))):
if start[i] == end[i]:
midpoint.append(start[i])
else:
ord_sum = ord(start[i]) + ord(end[i])
midpoint.append(chr(ord_sum / 2))
if ord_sum % 2:
if len(start) > i + 1:
ord_start = ord(start[i+1])
else:
ord_start = 0
if ord_start < expected_max:
ord_split = (expected_max + ord_start) / 2
else:
ord_split = (0xFFFF + ord_start) / 2
midpoint.append(chr(ord_split))
break
return "".join(midpoint)
@staticmethod
def split_keys(key_start, key_end, batch_size):
"""Return a key that is between key_start and key_end inclusive.
This method compares components of the ancestor paths of key_start
and key_end. The first place in the path that differs is
approximately split in half. If the kind components differ, a new
non-existent kind halfway between the two is used to split the
space. If the id_or_name components differ, then a new id_or_name
that is halfway between the two is selected. If the lower
id_or_name is numeric and the upper id_or_name is a string, then
the minumum string key u'\0' is used as the split id_or_name. The
key that is returned is the shared portion of the ancestor path
followed by the generated split component.
Args:
key_start: A db.Key or ndb.Key instance for the lower end of a range.
key_end: A db.Key or ndb.Key instance for the upper end of a range.
batch_size: The maximum size of a range that should not be split.
Returns:
A db.Key instance, k, such that key_start <= k <= key_end.
NOTE: Even though ndb.Key instances are accepted as arguments,
the return value is always a db.Key instance.
"""
if ndb is not None:
if isinstance(key_start, ndb.Key):
key_start = key_start.to_old_key()
if isinstance(key_end, ndb.Key):
key_end = key_end.to_old_key()
assert key_start.app() == key_end.app()
assert key_start.namespace() == key_end.namespace()
path1 = key_start.to_path()
path2 = key_end.to_path()
len1 = len(path1)
len2 = len(path2)
assert len1 % 2 == 0
assert len2 % 2 == 0
out_path = []
min_path_len = min(len1, len2) / 2
for i in range(min_path_len):
kind1 = path1[2*i]
kind2 = path2[2*i]
if kind1 != kind2:
split_kind = KeyRange.bisect_string_range(kind1, kind2)
out_path.append(split_kind)
out_path.append(chr(0))
break
last = (len1 == len2 == 2*(i + 1))
id_or_name1 = path1[2*i + 1]
id_or_name2 = path2[2*i + 1]
id_or_name_split = KeyRange._split_id_or_name(
id_or_name1, id_or_name2, batch_size, last)
if id_or_name1 == id_or_name_split:
out_path.append(kind1)
out_path.append(id_or_name1)
else:
out_path.append(kind1)
out_path.append(id_or_name_split)
break
return db.Key.from_path(
*out_path,
**{"_app": key_start.app(), "namespace": key_start.namespace()})
@staticmethod
def _split_id_or_name(id_or_name1, id_or_name2, batch_size, maintain_batches):
"""Return an id_or_name that is between id_or_name1 an id_or_name2.
Attempts to split the range [id_or_name1, id_or_name2] in half,
unless maintain_batches is true and the size of the range
[id_or_name1, id_or_name2] is less than or equal to batch_size.
Args:
id_or_name1: A number or string or the id_or_name component of a key
id_or_name2: A number or string or the id_or_name component of a key
batch_size: The range size that will not be split if maintain_batches
is true.
maintain_batches: A boolean for whether to keep small ranges intact.
Returns:
An id_or_name such that id_or_name1 <= id_or_name <= id_or_name2.
"""
if (isinstance(id_or_name1, int) and
isinstance(id_or_name2, int)):
if not maintain_batches or id_or_name2 - id_or_name1 > batch_size:
return (id_or_name1 + id_or_name2) / 2
else:
return id_or_name1
elif (isinstance(id_or_name1, str) and
isinstance(id_or_name2, str)):
return KeyRange.bisect_string_range(id_or_name1, id_or_name2)
else:
if (not isinstance(id_or_name1, int) or
not isinstance(id_or_name2, str)):
raise KeyRangeError("Wrong key order: %r, %r" %
(id_or_name1, id_or_name2))
zero_ch = chr(0)
if id_or_name2 == zero_ch:
return (id_or_name1 + 2**63 - 1) / 2
return zero_ch
@staticmethod
def guess_end_key(kind,
key_start,
probe_count=30,
split_rate=5):
"""Guess the end of a key range with a binary search of probe queries.
When the 'key_start' parameter has a key hierarchy, this function will
only determine the key range for keys in a similar hierarchy. That means
if the keys are in the form:
kind=Foo, name=bar/kind=Stuff, name=meep
only this range will be probed:
kind=Foo, name=*/kind=Stuff, name=*
That means other entities of kind 'Stuff' that are children of another
parent entity kind will be skipped:
kind=Other, name=cookie/kind=Stuff, name=meep
Args:
key_start: The starting key of the search range. In most cases this
should be id = 0 or name = '\0'. May be db.Key or ndb.Key.
kind: String name of the entity kind.
probe_count: Optional, how many probe queries to run.
split_rate: Exponential rate to use for splitting the range on the
way down from the full key space. For smaller ranges this should
be higher so more of the keyspace is skipped on initial descent.
Returns:
db.Key that is guaranteed to be as high or higher than the
highest key existing for this Kind. Doing a query between 'key_start' and
this returned Key (inclusive) will contain all entities of this Kind.
NOTE: Even though an ndb.Key instance is accepted as argument,
the return value is always a db.Key instance.
"""
if ndb is not None:
if isinstance(key_start, ndb.Key):
key_start = key_start.to_old_key()
app = key_start.app()
namespace = key_start.namespace()
full_path = key_start.to_path()
for index, piece in enumerate(full_path):
if index % 2 == 0:
continue
elif isinstance(piece, str):
full_path[index] = "\xffff"
else:
full_path[index] = 2**63 - 1
key_end = db.Key.from_path(*full_path,
**{"_app": app, "namespace": namespace})
split_key = key_end
for i in range(probe_count):
for j in range(split_rate):
split_key = KeyRange.split_keys(key_start, split_key, 1)
results = datastore.Query(
kind,
{"__key__ >": split_key},
namespace=namespace,
_app=app,
keys_only=True).Get(1)
if results:
if results[0].name() and not key_start.name():
return KeyRange.guess_end_key(
kind, results[0], probe_count - 1, split_rate)
else:
split_rate = 1
key_start = results[0]
split_key = key_end
else:
key_end = split_key
return key_end
@classmethod
def compute_split_points(cls, kind, count):
"""Computes a set of KeyRanges that are split points for a kind.
Args:
kind: String with the entity kind to split.
count: Number of non-overlapping KeyRanges to generate.
Returns:
A list of KeyRange objects that are non-overlapping. At most "count" + 1
KeyRange objects will be returned. At least one will be returned.
"""
query = datastore.Query(kind=kind, keys_only=True)
query.Order("__scatter__")
random_keys = query.Get(count)
if not random_keys:
return [cls()]
random_keys.sort()
key_ranges = []
key_ranges.append(cls(
key_start=None,
key_end=random_keys[0],
direction=cls.ASC,
include_start=False,
include_end=False))
for i in range(0, len(random_keys) - 1):
key_ranges.append(cls(
key_start=random_keys[i],
key_end=random_keys[i + 1],
direction=cls.ASC,
include_start=True,
include_end=False))
key_ranges.append(cls(
key_start=random_keys[-1],
key_end=None,
direction=cls.ASC,
include_start=True,
include_end=False))
return key_ranges
def to_json(self):
"""Serialize KeyRange to json.
Returns:
string with KeyRange json representation.
"""
if simplejson is None:
raise SimplejsonUnavailableError(
"JSON functionality requires json or simplejson to be available")
def key_to_str(key):
if key:
return str(key)
else:
return None
obj_dict = {
"direction": self.direction,
"key_start": key_to_str(self.key_start),
"key_end": key_to_str(self.key_end),
"include_start": self.include_start,
"include_end": self.include_end,
"namespace": self.namespace,
}
if self._app:
obj_dict["_app"] = self._app
return simplejson.dumps(obj_dict, sort_keys=True)
@staticmethod
def from_json(json_str):
"""Deserialize KeyRange from its json representation.
Args:
json_str: string with json representation created by key_range_to_json.
Returns:
deserialized KeyRange instance.
"""
if simplejson is None:
raise SimplejsonUnavailableError(
"JSON functionality requires json or simplejson to be available")
def key_from_str(key_str):
if key_str:
return db.Key(key_str)
else:
return None
json = simplejson.loads(json_str)
return KeyRange(key_from_str(json["key_start"]),
key_from_str(json["key_end"]),
json["direction"],
json["include_start"],
json["include_end"],
json.get("namespace"),
_app=json.get("_app"))
|
|
# -*- coding: utf-8 -*-
"""
banner
Description goes here...
:copyright: (c) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import unittest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, \
NoAlertPresentException
from base import Selenium2OnSauce
class Banner(Selenium2OnSauce):
def test_advanced_skills(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/advanced-skills/")
self.assertTrue(self.is_element_present(By.ID, "wfmis"))
def test_advanced_skills_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/advanced-skills/")
def test_advanced_skills_erd(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/advanced-skills/")
def test_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/")
def test_central_overnment(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/central-government/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_company_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/company-research/")
def test_company_training_provider(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/company-training-programs/")
def test_courseware(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_developing_tomorrow(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/")
def test_download(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/download/")
def test_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/foundation-skills/epp/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_erd(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/")
def test_event(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/")
def test_executive_summary(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/executive-summary/")
def test_foundation_advance_skills_devlopment(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/")
def test_foundation_convocation_banner(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.XPATH, "(//a[contains(text(),'Know More')])[3]"))
driver.get("http://pursuite.openlabs.us/about-us/ssc-nasscom/vision-mission/")
def test_foundation_skills_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/")
def test_foundation_skills_ed(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/erd/foundation-skills/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_foundation_skills_epp(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/foundation-skills/")
def test_full_course(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/full-course/")
def test_gbfs_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "span.filetitle"))
def test_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_government_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_government_training_program(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/government-training-programs/")
def test_healp_you_choose(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.LINK_TEXT, "Know More"))
def test_ict_academy_tamilnadu(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/ict-academy-tamilnadu/")
def test_il_fs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resougvrces/private-sector-training-programs/ilfs/")
def test_implementation_cycle_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/implementation-cycle/")
def test_interactive_tools(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/interactive-tools/")
def test_it_initiative(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_it_ites(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/it-ites-initiativesprograms/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[7]/div"))
def test_listining_of_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/listing-programs/")
def test_nasscom_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/nasscom-research/")
def test_niit(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/niit/")
def test_obf_bpm(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/bpm/foundation-skills/gbfs/outcome-based-framework-gbfs/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "span.filetitle"))
def test_other_bodies_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/government-training-programs/other-bodies/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
def test_other_bodies(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/other-bodies/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[5]/div"))
def test_other_publication(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/other-publication/")
def test_policy_development(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/policy-development/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_private_sector_training_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/")
def test_program_registration(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/event-workforce-enablement/program-registration/")
def test_promotion_marketing(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/promotion-marketing/")
def test_read_only(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/foundation-advance-skills-development/foundation-skills/courseware/read-only/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_research(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
def test_skills_academy(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/skills-academy/")
def test_software_products(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/software-products/")
def test_ssc_training_programs(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/ssc-nasscom-training-programs/")
def test_state_government(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/research/government-research/state-government/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[4]/div"))
def test_talent_sprint(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/private-sector-training-programs/talent-sprint/")
def test_training_materials(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/training-materials/")
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, "div.mid-box-flip"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[2]/div"))
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='contentAndSidebars']/div/div[2]/div[2]/div/div/div[3]/div"))
def test_training_that_helps_you(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/")
self.assertTrue(self.is_element_present(By.XPATH, "(//a[contains(text(),'Know More')])[2]"))
def test_training_tools(self):
driver = self.driver
driver.get("http://pursuite.openlabs.us/ssc-article/it-ites-initiative/developing-tomorrows-workforce-today/training-programs-tools-resources/training-tools/")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
if __name__ == "__main__":
unittest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import NetworkManagementClientConfiguration
from .operations import ApplicationGatewaysOperations
from .operations import ApplicationSecurityGroupsOperations
from .operations import AvailableDelegationsOperations
from .operations import AvailableResourceGroupDelegationsOperations
from .operations import AvailableServiceAliasesOperations
from .operations import AzureFirewallsOperations
from .operations import AzureFirewallFqdnTagsOperations
from .operations import BastionHostsOperations
from .operations import NetworkManagementClientOperationsMixin
from .operations import DdosCustomPoliciesOperations
from .operations import DdosProtectionPlansOperations
from .operations import AvailableEndpointServicesOperations
from .operations import ExpressRouteCircuitAuthorizationsOperations
from .operations import ExpressRouteCircuitPeeringsOperations
from .operations import ExpressRouteCircuitConnectionsOperations
from .operations import PeerExpressRouteCircuitConnectionsOperations
from .operations import ExpressRouteCircuitsOperations
from .operations import ExpressRouteServiceProvidersOperations
from .operations import ExpressRouteCrossConnectionsOperations
from .operations import ExpressRouteCrossConnectionPeeringsOperations
from .operations import ExpressRoutePortsLocationsOperations
from .operations import ExpressRoutePortsOperations
from .operations import ExpressRouteLinksOperations
from .operations import FirewallPoliciesOperations
from .operations import FirewallPolicyRuleGroupsOperations
from .operations import IpAllocationsOperations
from .operations import IpGroupsOperations
from .operations import LoadBalancersOperations
from .operations import LoadBalancerBackendAddressPoolsOperations
from .operations import LoadBalancerFrontendIPConfigurationsOperations
from .operations import InboundNatRulesOperations
from .operations import LoadBalancerLoadBalancingRulesOperations
from .operations import LoadBalancerOutboundRulesOperations
from .operations import LoadBalancerNetworkInterfacesOperations
from .operations import LoadBalancerProbesOperations
from .operations import NatGatewaysOperations
from .operations import NetworkInterfacesOperations
from .operations import NetworkInterfaceIPConfigurationsOperations
from .operations import NetworkInterfaceLoadBalancersOperations
from .operations import NetworkInterfaceTapConfigurationsOperations
from .operations import NetworkProfilesOperations
from .operations import NetworkSecurityGroupsOperations
from .operations import SecurityRulesOperations
from .operations import DefaultSecurityRulesOperations
from .operations import NetworkVirtualAppliancesOperations
from .operations import NetworkWatchersOperations
from .operations import PacketCapturesOperations
from .operations import ConnectionMonitorsOperations
from .operations import FlowLogsOperations
from .operations import Operations
from .operations import PrivateEndpointsOperations
from .operations import AvailablePrivateEndpointTypesOperations
from .operations import PrivateDnsZoneGroupsOperations
from .operations import PrivateLinkServicesOperations
from .operations import PublicIPAddressesOperations
from .operations import PublicIPPrefixesOperations
from .operations import RouteFiltersOperations
from .operations import RouteFilterRulesOperations
from .operations import RouteTablesOperations
from .operations import RoutesOperations
from .operations import SecurityPartnerProvidersOperations
from .operations import BgpServiceCommunitiesOperations
from .operations import ServiceEndpointPoliciesOperations
from .operations import ServiceEndpointPolicyDefinitionsOperations
from .operations import ServiceTagsOperations
from .operations import UsagesOperations
from .operations import VirtualNetworksOperations
from .operations import SubnetsOperations
from .operations import ResourceNavigationLinksOperations
from .operations import ServiceAssociationLinksOperations
from .operations import VirtualNetworkPeeringsOperations
from .operations import VirtualNetworkGatewaysOperations
from .operations import VirtualNetworkGatewayConnectionsOperations
from .operations import LocalNetworkGatewaysOperations
from .operations import VirtualNetworkTapsOperations
from .operations import VirtualRoutersOperations
from .operations import VirtualRouterPeeringsOperations
from .operations import VirtualWansOperations
from .operations import VpnSitesOperations
from .operations import VpnSiteLinksOperations
from .operations import VpnSitesConfigurationOperations
from .operations import VpnServerConfigurationsOperations
from .operations import VirtualHubsOperations
from .operations import HubVirtualNetworkConnectionsOperations
from .operations import VpnGatewaysOperations
from .operations import VpnConnectionsOperations
from .operations import VpnSiteLinkConnectionsOperations
from .operations import VpnLinkConnectionsOperations
from .operations import P2SVpnGatewaysOperations
from .operations import VpnServerConfigurationsAssociatedWithVirtualWanOperations
from .operations import VirtualHubRouteTableV2SOperations
from .operations import ExpressRouteGatewaysOperations
from .operations import ExpressRouteConnectionsOperations
from .operations import WebApplicationFirewallPoliciesOperations
from .. import models
class NetworkManagementClient(NetworkManagementClientOperationsMixin):
"""Network Client.
:ivar application_gateways: ApplicationGatewaysOperations operations
:vartype application_gateways: azure.mgmt.network.v2020_03_01.aio.operations.ApplicationGatewaysOperations
:ivar application_security_groups: ApplicationSecurityGroupsOperations operations
:vartype application_security_groups: azure.mgmt.network.v2020_03_01.aio.operations.ApplicationSecurityGroupsOperations
:ivar available_delegations: AvailableDelegationsOperations operations
:vartype available_delegations: azure.mgmt.network.v2020_03_01.aio.operations.AvailableDelegationsOperations
:ivar available_resource_group_delegations: AvailableResourceGroupDelegationsOperations operations
:vartype available_resource_group_delegations: azure.mgmt.network.v2020_03_01.aio.operations.AvailableResourceGroupDelegationsOperations
:ivar available_service_aliases: AvailableServiceAliasesOperations operations
:vartype available_service_aliases: azure.mgmt.network.v2020_03_01.aio.operations.AvailableServiceAliasesOperations
:ivar azure_firewalls: AzureFirewallsOperations operations
:vartype azure_firewalls: azure.mgmt.network.v2020_03_01.aio.operations.AzureFirewallsOperations
:ivar azure_firewall_fqdn_tags: AzureFirewallFqdnTagsOperations operations
:vartype azure_firewall_fqdn_tags: azure.mgmt.network.v2020_03_01.aio.operations.AzureFirewallFqdnTagsOperations
:ivar bastion_hosts: BastionHostsOperations operations
:vartype bastion_hosts: azure.mgmt.network.v2020_03_01.aio.operations.BastionHostsOperations
:ivar ddos_custom_policies: DdosCustomPoliciesOperations operations
:vartype ddos_custom_policies: azure.mgmt.network.v2020_03_01.aio.operations.DdosCustomPoliciesOperations
:ivar ddos_protection_plans: DdosProtectionPlansOperations operations
:vartype ddos_protection_plans: azure.mgmt.network.v2020_03_01.aio.operations.DdosProtectionPlansOperations
:ivar available_endpoint_services: AvailableEndpointServicesOperations operations
:vartype available_endpoint_services: azure.mgmt.network.v2020_03_01.aio.operations.AvailableEndpointServicesOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations operations
:vartype express_route_circuit_authorizations: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations
:vartype express_route_circuit_peerings: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuit_connections: ExpressRouteCircuitConnectionsOperations operations
:vartype express_route_circuit_connections: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteCircuitConnectionsOperations
:ivar peer_express_route_circuit_connections: PeerExpressRouteCircuitConnectionsOperations operations
:vartype peer_express_route_circuit_connections: azure.mgmt.network.v2020_03_01.aio.operations.PeerExpressRouteCircuitConnectionsOperations
:ivar express_route_circuits: ExpressRouteCircuitsOperations operations
:vartype express_route_circuits: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations
:vartype express_route_service_providers: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteServiceProvidersOperations
:ivar express_route_cross_connections: ExpressRouteCrossConnectionsOperations operations
:vartype express_route_cross_connections: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteCrossConnectionsOperations
:ivar express_route_cross_connection_peerings: ExpressRouteCrossConnectionPeeringsOperations operations
:vartype express_route_cross_connection_peerings: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteCrossConnectionPeeringsOperations
:ivar express_route_ports_locations: ExpressRoutePortsLocationsOperations operations
:vartype express_route_ports_locations: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRoutePortsLocationsOperations
:ivar express_route_ports: ExpressRoutePortsOperations operations
:vartype express_route_ports: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRoutePortsOperations
:ivar express_route_links: ExpressRouteLinksOperations operations
:vartype express_route_links: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteLinksOperations
:ivar firewall_policies: FirewallPoliciesOperations operations
:vartype firewall_policies: azure.mgmt.network.v2020_03_01.aio.operations.FirewallPoliciesOperations
:ivar firewall_policy_rule_groups: FirewallPolicyRuleGroupsOperations operations
:vartype firewall_policy_rule_groups: azure.mgmt.network.v2020_03_01.aio.operations.FirewallPolicyRuleGroupsOperations
:ivar ip_allocations: IpAllocationsOperations operations
:vartype ip_allocations: azure.mgmt.network.v2020_03_01.aio.operations.IpAllocationsOperations
:ivar ip_groups: IpGroupsOperations operations
:vartype ip_groups: azure.mgmt.network.v2020_03_01.aio.operations.IpGroupsOperations
:ivar load_balancers: LoadBalancersOperations operations
:vartype load_balancers: azure.mgmt.network.v2020_03_01.aio.operations.LoadBalancersOperations
:ivar load_balancer_backend_address_pools: LoadBalancerBackendAddressPoolsOperations operations
:vartype load_balancer_backend_address_pools: azure.mgmt.network.v2020_03_01.aio.operations.LoadBalancerBackendAddressPoolsOperations
:ivar load_balancer_frontend_ip_configurations: LoadBalancerFrontendIPConfigurationsOperations operations
:vartype load_balancer_frontend_ip_configurations: azure.mgmt.network.v2020_03_01.aio.operations.LoadBalancerFrontendIPConfigurationsOperations
:ivar inbound_nat_rules: InboundNatRulesOperations operations
:vartype inbound_nat_rules: azure.mgmt.network.v2020_03_01.aio.operations.InboundNatRulesOperations
:ivar load_balancer_load_balancing_rules: LoadBalancerLoadBalancingRulesOperations operations
:vartype load_balancer_load_balancing_rules: azure.mgmt.network.v2020_03_01.aio.operations.LoadBalancerLoadBalancingRulesOperations
:ivar load_balancer_outbound_rules: LoadBalancerOutboundRulesOperations operations
:vartype load_balancer_outbound_rules: azure.mgmt.network.v2020_03_01.aio.operations.LoadBalancerOutboundRulesOperations
:ivar load_balancer_network_interfaces: LoadBalancerNetworkInterfacesOperations operations
:vartype load_balancer_network_interfaces: azure.mgmt.network.v2020_03_01.aio.operations.LoadBalancerNetworkInterfacesOperations
:ivar load_balancer_probes: LoadBalancerProbesOperations operations
:vartype load_balancer_probes: azure.mgmt.network.v2020_03_01.aio.operations.LoadBalancerProbesOperations
:ivar nat_gateways: NatGatewaysOperations operations
:vartype nat_gateways: azure.mgmt.network.v2020_03_01.aio.operations.NatGatewaysOperations
:ivar network_interfaces: NetworkInterfacesOperations operations
:vartype network_interfaces: azure.mgmt.network.v2020_03_01.aio.operations.NetworkInterfacesOperations
:ivar network_interface_ip_configurations: NetworkInterfaceIPConfigurationsOperations operations
:vartype network_interface_ip_configurations: azure.mgmt.network.v2020_03_01.aio.operations.NetworkInterfaceIPConfigurationsOperations
:ivar network_interface_load_balancers: NetworkInterfaceLoadBalancersOperations operations
:vartype network_interface_load_balancers: azure.mgmt.network.v2020_03_01.aio.operations.NetworkInterfaceLoadBalancersOperations
:ivar network_interface_tap_configurations: NetworkInterfaceTapConfigurationsOperations operations
:vartype network_interface_tap_configurations: azure.mgmt.network.v2020_03_01.aio.operations.NetworkInterfaceTapConfigurationsOperations
:ivar network_profiles: NetworkProfilesOperations operations
:vartype network_profiles: azure.mgmt.network.v2020_03_01.aio.operations.NetworkProfilesOperations
:ivar network_security_groups: NetworkSecurityGroupsOperations operations
:vartype network_security_groups: azure.mgmt.network.v2020_03_01.aio.operations.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRulesOperations operations
:vartype security_rules: azure.mgmt.network.v2020_03_01.aio.operations.SecurityRulesOperations
:ivar default_security_rules: DefaultSecurityRulesOperations operations
:vartype default_security_rules: azure.mgmt.network.v2020_03_01.aio.operations.DefaultSecurityRulesOperations
:ivar network_virtual_appliances: NetworkVirtualAppliancesOperations operations
:vartype network_virtual_appliances: azure.mgmt.network.v2020_03_01.aio.operations.NetworkVirtualAppliancesOperations
:ivar network_watchers: NetworkWatchersOperations operations
:vartype network_watchers: azure.mgmt.network.v2020_03_01.aio.operations.NetworkWatchersOperations
:ivar packet_captures: PacketCapturesOperations operations
:vartype packet_captures: azure.mgmt.network.v2020_03_01.aio.operations.PacketCapturesOperations
:ivar connection_monitors: ConnectionMonitorsOperations operations
:vartype connection_monitors: azure.mgmt.network.v2020_03_01.aio.operations.ConnectionMonitorsOperations
:ivar flow_logs: FlowLogsOperations operations
:vartype flow_logs: azure.mgmt.network.v2020_03_01.aio.operations.FlowLogsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.network.v2020_03_01.aio.operations.Operations
:ivar private_endpoints: PrivateEndpointsOperations operations
:vartype private_endpoints: azure.mgmt.network.v2020_03_01.aio.operations.PrivateEndpointsOperations
:ivar available_private_endpoint_types: AvailablePrivateEndpointTypesOperations operations
:vartype available_private_endpoint_types: azure.mgmt.network.v2020_03_01.aio.operations.AvailablePrivateEndpointTypesOperations
:ivar private_dns_zone_groups: PrivateDnsZoneGroupsOperations operations
:vartype private_dns_zone_groups: azure.mgmt.network.v2020_03_01.aio.operations.PrivateDnsZoneGroupsOperations
:ivar private_link_services: PrivateLinkServicesOperations operations
:vartype private_link_services: azure.mgmt.network.v2020_03_01.aio.operations.PrivateLinkServicesOperations
:ivar public_ip_addresses: PublicIPAddressesOperations operations
:vartype public_ip_addresses: azure.mgmt.network.v2020_03_01.aio.operations.PublicIPAddressesOperations
:ivar public_ip_prefixes: PublicIPPrefixesOperations operations
:vartype public_ip_prefixes: azure.mgmt.network.v2020_03_01.aio.operations.PublicIPPrefixesOperations
:ivar route_filters: RouteFiltersOperations operations
:vartype route_filters: azure.mgmt.network.v2020_03_01.aio.operations.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRulesOperations operations
:vartype route_filter_rules: azure.mgmt.network.v2020_03_01.aio.operations.RouteFilterRulesOperations
:ivar route_tables: RouteTablesOperations operations
:vartype route_tables: azure.mgmt.network.v2020_03_01.aio.operations.RouteTablesOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.network.v2020_03_01.aio.operations.RoutesOperations
:ivar security_partner_providers: SecurityPartnerProvidersOperations operations
:vartype security_partner_providers: azure.mgmt.network.v2020_03_01.aio.operations.SecurityPartnerProvidersOperations
:ivar bgp_service_communities: BgpServiceCommunitiesOperations operations
:vartype bgp_service_communities: azure.mgmt.network.v2020_03_01.aio.operations.BgpServiceCommunitiesOperations
:ivar service_endpoint_policies: ServiceEndpointPoliciesOperations operations
:vartype service_endpoint_policies: azure.mgmt.network.v2020_03_01.aio.operations.ServiceEndpointPoliciesOperations
:ivar service_endpoint_policy_definitions: ServiceEndpointPolicyDefinitionsOperations operations
:vartype service_endpoint_policy_definitions: azure.mgmt.network.v2020_03_01.aio.operations.ServiceEndpointPolicyDefinitionsOperations
:ivar service_tags: ServiceTagsOperations operations
:vartype service_tags: azure.mgmt.network.v2020_03_01.aio.operations.ServiceTagsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.network.v2020_03_01.aio.operations.UsagesOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.network.v2020_03_01.aio.operations.VirtualNetworksOperations
:ivar subnets: SubnetsOperations operations
:vartype subnets: azure.mgmt.network.v2020_03_01.aio.operations.SubnetsOperations
:ivar resource_navigation_links: ResourceNavigationLinksOperations operations
:vartype resource_navigation_links: azure.mgmt.network.v2020_03_01.aio.operations.ResourceNavigationLinksOperations
:ivar service_association_links: ServiceAssociationLinksOperations operations
:vartype service_association_links: azure.mgmt.network.v2020_03_01.aio.operations.ServiceAssociationLinksOperations
:ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations
:vartype virtual_network_peerings: azure.mgmt.network.v2020_03_01.aio.operations.VirtualNetworkPeeringsOperations
:ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations
:vartype virtual_network_gateways: azure.mgmt.network.v2020_03_01.aio.operations.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations operations
:vartype virtual_network_gateway_connections: azure.mgmt.network.v2020_03_01.aio.operations.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGatewaysOperations operations
:vartype local_network_gateways: azure.mgmt.network.v2020_03_01.aio.operations.LocalNetworkGatewaysOperations
:ivar virtual_network_taps: VirtualNetworkTapsOperations operations
:vartype virtual_network_taps: azure.mgmt.network.v2020_03_01.aio.operations.VirtualNetworkTapsOperations
:ivar virtual_routers: VirtualRoutersOperations operations
:vartype virtual_routers: azure.mgmt.network.v2020_03_01.aio.operations.VirtualRoutersOperations
:ivar virtual_router_peerings: VirtualRouterPeeringsOperations operations
:vartype virtual_router_peerings: azure.mgmt.network.v2020_03_01.aio.operations.VirtualRouterPeeringsOperations
:ivar virtual_wans: VirtualWansOperations operations
:vartype virtual_wans: azure.mgmt.network.v2020_03_01.aio.operations.VirtualWansOperations
:ivar vpn_sites: VpnSitesOperations operations
:vartype vpn_sites: azure.mgmt.network.v2020_03_01.aio.operations.VpnSitesOperations
:ivar vpn_site_links: VpnSiteLinksOperations operations
:vartype vpn_site_links: azure.mgmt.network.v2020_03_01.aio.operations.VpnSiteLinksOperations
:ivar vpn_sites_configuration: VpnSitesConfigurationOperations operations
:vartype vpn_sites_configuration: azure.mgmt.network.v2020_03_01.aio.operations.VpnSitesConfigurationOperations
:ivar vpn_server_configurations: VpnServerConfigurationsOperations operations
:vartype vpn_server_configurations: azure.mgmt.network.v2020_03_01.aio.operations.VpnServerConfigurationsOperations
:ivar virtual_hubs: VirtualHubsOperations operations
:vartype virtual_hubs: azure.mgmt.network.v2020_03_01.aio.operations.VirtualHubsOperations
:ivar hub_virtual_network_connections: HubVirtualNetworkConnectionsOperations operations
:vartype hub_virtual_network_connections: azure.mgmt.network.v2020_03_01.aio.operations.HubVirtualNetworkConnectionsOperations
:ivar vpn_gateways: VpnGatewaysOperations operations
:vartype vpn_gateways: azure.mgmt.network.v2020_03_01.aio.operations.VpnGatewaysOperations
:ivar vpn_connections: VpnConnectionsOperations operations
:vartype vpn_connections: azure.mgmt.network.v2020_03_01.aio.operations.VpnConnectionsOperations
:ivar vpn_site_link_connections: VpnSiteLinkConnectionsOperations operations
:vartype vpn_site_link_connections: azure.mgmt.network.v2020_03_01.aio.operations.VpnSiteLinkConnectionsOperations
:ivar vpn_link_connections: VpnLinkConnectionsOperations operations
:vartype vpn_link_connections: azure.mgmt.network.v2020_03_01.aio.operations.VpnLinkConnectionsOperations
:ivar p2_svpn_gateways: P2SVpnGatewaysOperations operations
:vartype p2_svpn_gateways: azure.mgmt.network.v2020_03_01.aio.operations.P2SVpnGatewaysOperations
:ivar vpn_server_configurations_associated_with_virtual_wan: VpnServerConfigurationsAssociatedWithVirtualWanOperations operations
:vartype vpn_server_configurations_associated_with_virtual_wan: azure.mgmt.network.v2020_03_01.aio.operations.VpnServerConfigurationsAssociatedWithVirtualWanOperations
:ivar virtual_hub_route_table_v2_s: VirtualHubRouteTableV2SOperations operations
:vartype virtual_hub_route_table_v2_s: azure.mgmt.network.v2020_03_01.aio.operations.VirtualHubRouteTableV2SOperations
:ivar express_route_gateways: ExpressRouteGatewaysOperations operations
:vartype express_route_gateways: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteGatewaysOperations
:ivar express_route_connections: ExpressRouteConnectionsOperations operations
:vartype express_route_connections: azure.mgmt.network.v2020_03_01.aio.operations.ExpressRouteConnectionsOperations
:ivar web_application_firewall_policies: WebApplicationFirewallPoliciesOperations operations
:vartype web_application_firewall_policies: azure.mgmt.network.v2020_03_01.aio.operations.WebApplicationFirewallPoliciesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetworkManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_security_groups = ApplicationSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_delegations = AvailableDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_resource_group_delegations = AvailableResourceGroupDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_service_aliases = AvailableServiceAliasesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewalls = AzureFirewallsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewall_fqdn_tags = AzureFirewallFqdnTagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bastion_hosts = BastionHostsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_custom_policies = DdosCustomPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_protection_plans = DdosProtectionPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_endpoint_services = AvailableEndpointServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_connections = ExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peer_express_route_circuit_connections = PeerExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connections = ExpressRouteCrossConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connection_peerings = ExpressRouteCrossConnectionPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports_locations = ExpressRoutePortsLocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports = ExpressRoutePortsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_links = ExpressRouteLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.firewall_policies = FirewallPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.firewall_policy_rule_groups = FirewallPolicyRuleGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ip_allocations = IpAllocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ip_groups = IpGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancers = LoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_backend_address_pools = LoadBalancerBackendAddressPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_frontend_ip_configurations = LoadBalancerFrontendIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.inbound_nat_rules = InboundNatRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_load_balancing_rules = LoadBalancerLoadBalancingRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_outbound_rules = LoadBalancerOutboundRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_network_interfaces = LoadBalancerNetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_probes = LoadBalancerProbesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.nat_gateways = NatGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interfaces = NetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_ip_configurations = NetworkInterfaceIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_load_balancers = NetworkInterfaceLoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_tap_configurations = NetworkInterfaceTapConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_profiles = NetworkProfilesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_rules = SecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.default_security_rules = DefaultSecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_virtual_appliances = NetworkVirtualAppliancesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.packet_captures = PacketCapturesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.connection_monitors = ConnectionMonitorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.flow_logs = FlowLogsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoints = PrivateEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_private_endpoint_types = AvailablePrivateEndpointTypesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_dns_zone_groups = PrivateDnsZoneGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_services = PrivateLinkServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_prefixes = PublicIPPrefixesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filters = RouteFiltersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_tables = RouteTablesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_partner_providers = SecurityPartnerProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policies = ServiceEndpointPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policy_definitions = ServiceEndpointPolicyDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_tags = ServiceTagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subnets = SubnetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_navigation_links = ResourceNavigationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_association_links = ServiceAssociationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_taps = VirtualNetworkTapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_routers = VirtualRoutersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_router_peerings = VirtualRouterPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_wans = VirtualWansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites = VpnSitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_site_links = VpnSiteLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites_configuration = VpnSitesConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_server_configurations = VpnServerConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_hubs = VirtualHubsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.hub_virtual_network_connections = HubVirtualNetworkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_gateways = VpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_connections = VpnConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_site_link_connections = VpnSiteLinkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_link_connections = VpnLinkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.p2_svpn_gateways = P2SVpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_server_configurations_associated_with_virtual_wan = VpnServerConfigurationsAssociatedWithVirtualWanOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_hub_route_table_v2_s = VirtualHubRouteTableV2SOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_gateways = ExpressRouteGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_connections = ExpressRouteConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.web_application_firewall_policies = WebApplicationFirewallPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "NetworkManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union, Sequence, List
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, bitcoin
from electrum.bip32 import BIP32Node, xpub_type
from electrum.crypto import sha256
from electrum.transaction import PartialTxOutput, PartialTxInput, PartialTransaction, Transaction
from electrum.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import StorageEncryptionVersion
from electrum.network import Network
from electrum.base_wizard import BaseWizard, WizardWalletPasswordSetting
from electrum.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
plugin: 'TrustedCoinPlugin'
wallet_type = '2fa'
def __init__(self, db, storage, *, config):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, db, storage, config=config)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.db.get('trustedcoin_billing_addresses', {}),
'segwit': self.db.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.db)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self):
default = self.min_prepay()
n = self.config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay()
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(
self, *,
coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput],
fee=None,
change_addr: str = None,
is_sweep=False,
rbf=False) -> PartialTransaction:
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins=coins, outputs=o, fee=fee, change_addr=change_addr, rbf=rbf)
extra_fee = self.extra_fee() if not is_sweep else 0
if extra_fee:
address = self.billing_info['billing_address_segwit']
fee_output = PartialTxOutput.from_address_and_value(address, extra_fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= extra_fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx: PartialTransaction, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize_as_bytes().hex()
assert raw_tx[:10] == "70736274ff", f"bad magic. {raw_tx[:10]}"
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
received_raw_tx = r.get('transaction')
received_tx = Transaction(received_raw_tx)
tx.combine_with_other_psbt(received_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.db.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.db.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.db.write(self.storage)
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(db):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = db.get('x1/')['xpub']
xpub2 = db.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
assert tx
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure) -> None:
raise NotImplementedError()
@hook
def get_tx_extra_fee(self, wallet, tx: Transaction):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(repr(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
seed_type = '2fa' if self.config.get('nosegwit') else '2fa_segwit'
self.create_seed(wizard, seed_type)
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.opt_bip39 = False
wizard.opt_ext = True
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
if t == '2fa':
if n >= 20: # old scheme
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif n == 12: # new scheme
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception(f'unrecognized seed length for "2fa" seed: {n}')
elif t == '2fa_segwit':
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception(f'unexpected seed type: {t}')
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_slip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, seed_type, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate(aborted=True)
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_slip39 = False
wizard.opt_ext = True
f = lambda seed, seed_type, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate(aborted=True)
except Exception as e:
wizard.show_message(repr(e))
wizard.terminate(aborted=True)
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, db):
if db.get('wallet_type') != '2fa':
return
if not db.get('x1/'):
return self, 'show_disclaimer'
if not db.get('x2/'):
return self, 'show_disclaimer'
if not db.get('x3/'):
return self, 'accept_terms_of_use'
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import contextlib
import eventlet
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
matchmaker_opts = [
cfg.IntOpt('matchmaker_heartbeat_freq',
default=300,
help='Heartbeat frequency'),
cfg.IntOpt('matchmaker_heartbeat_ttl',
default=600,
help='Heartbeat time-to-live.'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts)
LOG = logging.getLogger(__name__)
contextmanager = contextlib.contextmanager
class MatchMakerException(Exception):
"""Signified a match could not be found."""
message = _("Match not found by MatchMaker.")
class Exchange(object):
"""Implements lookups.
Subclass this to support hashtables, dns, etc.
"""
def __init__(self):
pass
def run(self, key):
raise NotImplementedError()
class Binding(object):
"""A binding on which to perform a lookup."""
def __init__(self):
pass
def test(self, key):
raise NotImplementedError()
class MatchMakerBase(object):
"""Match Maker Base Class.
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
MatchMaker.
"""
def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
self.bindings = []
self.no_heartbeat_msg = _('Matchmaker does not implement '
'registration or heartbeat.')
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
pass
def ack_alive(self, key, host):
"""Acknowledge that a key.host is alive.
Used internally for updating heartbeats, but may also be used
publically to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
pass
def is_alive(self, topic, host):
"""Checks if a host is alive."""
pass
def expire(self, topic, host):
"""Explicitly expire a host's registration."""
pass
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
pass
def unregister(self, key, host):
"""Unregister a topic."""
pass
def start_heartbeat(self):
"""Spawn heartbeat greenthread."""
pass
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
pass
def add_binding(self, binding, rule, last=True):
self.bindings.append((binding, rule, False, last))
#NOTE(ewindisch): kept the following method in case we implement the
# underlying support.
#def add_negate_binding(self, binding, rule, last=True):
# self.bindings.append((binding, rule, True, last))
def queues(self, key):
workers = []
# bit is for negate bindings - if we choose to implement it.
# last stops processing rules if this matches.
for (binding, exchange, bit, last) in self.bindings:
if binding.test(key):
workers.extend(exchange.run(key))
# Support last.
if last:
return workers
return workers
class HeartbeatMatchMakerBase(MatchMakerBase):
"""Base for a heart-beat capable MatchMaker.
Provides common methods for registering, unregistering, and maintaining
heartbeats.
"""
def __init__(self):
self.hosts = set()
self._heart = None
self.host_topic = {}
super(HeartbeatMatchMakerBase, self).__init__()
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
for key, host in self.host_topic:
self.ack_alive(key, host)
def ack_alive(self, key, host):
"""Acknowledge that a host.topic is alive.
Used internally for updating heartbeats, but may also be used
publically to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
raise NotImplementedError("Must implement ack_alive")
def backend_register(self, key, host):
"""Implements registration logic.
Called by register(self,key,host)
"""
raise NotImplementedError("Must implement backend_register")
def backend_unregister(self, key, key_host):
"""Implements de-registration logic.
Called by unregister(self,key,host)
"""
raise NotImplementedError("Must implement backend_unregister")
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
self.hosts.add(host)
self.host_topic[(key, host)] = host
key_host = '.'.join((key, host))
self.backend_register(key, key_host)
self.ack_alive(key, host)
def unregister(self, key, host):
"""Unregister a topic."""
if (key, host) in self.host_topic:
del self.host_topic[(key, host)]
self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self):
"""Implementation of MatchMakerBase.start_heartbeat.
Launches greenthread looping send_heartbeats(),
yielding for CONF.matchmaker_heartbeat_freq seconds
between iterations.
"""
if not self.hosts:
raise MatchMakerException(
_("Register before starting heartbeat."))
def do_heartbeat():
while True:
self.send_heartbeats()
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
self._heart = eventlet.spawn(do_heartbeat)
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
if self._heart:
self._heart.kill()
class DirectBinding(Binding):
"""Specifies a host in the key via a '.' character.
Although dots are used in the key, the behavior here is
that it maps directly to a host, thus direct.
"""
def test(self, key):
if '.' in key:
return True
return False
class TopicBinding(Binding):
"""Where a 'bare' key without dots.
AMQP generally considers topic exchanges to be those *with* dots,
but we deviate here in terminology as the behavior here matches
that of a topic exchange (whereas where there are dots, behavior
matches that of a direct exchange.
"""
def test(self, key):
if '.' not in key:
return True
return False
class FanoutBinding(Binding):
"""Match on fanout keys, where key starts with 'fanout.' string."""
def test(self, key):
if key.startswith('fanout~'):
return True
return False
class StubExchange(Exchange):
"""Exchange that does nothing."""
def run(self, key):
return [(key, None)]
class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local."""
def __init__(self, host='localhost'):
self.host = host
super(Exchange, self).__init__()
def run(self, key):
return [('.'.join((key.split('.')[0], self.host)), self.host)]
class DirectExchange(Exchange):
"""Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute.host" running on "host"
"""
def __init__(self):
super(Exchange, self).__init__()
def run(self, key):
e = key.split('.', 1)[1]
return [(key, e)]
class MatchMakerLocalhost(MatchMakerBase):
"""Match Maker where all bare topics resolve to localhost.
Useful for testing.
"""
def __init__(self, host='localhost'):
super(MatchMakerLocalhost, self).__init__()
self.add_binding(FanoutBinding(), LocalhostExchange(host))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), LocalhostExchange(host))
class MatchMakerStub(MatchMakerBase):
"""Match Maker where topics are untouched.
Useful for testing, or for AMQP/brokered queues.
Will not work where knowledge of hosts is known (i.e. zeromq)
"""
def __init__(self):
super(MatchMakerStub, self).__init__()
self.add_binding(FanoutBinding(), StubExchange())
self.add_binding(DirectBinding(), StubExchange())
self.add_binding(TopicBinding(), StubExchange())
|
|
#!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import logging
import json
import os
import shutil
import sys
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str
from libsolr.api import SolrApi
from libzookeeper.models import ZookeeperClient
from indexer.conf import CORE_INSTANCE_DIR, get_solr_ensemble
from indexer.utils import copy_configs
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
MAX_UPLOAD_SIZE = 100 * 1024 * 1024 # 100 MB
ALLOWED_FIELD_ATTRIBUTES = set(['name', 'type', 'indexed', 'stored'])
FLAGS = [('I', 'indexed'), ('T', 'tokenized'), ('S', 'stored'), ('M', 'multivalued')]
ZK_SOLR_CONFIG_NAMESPACE = 'configs'
_IS_SOLR_CLOUD = None
_IS_SOLR_6_OR_MORE = None
_IS_SOLR_WITH_HDFS = None
_ZOOKEEPER_HOST = None
_IS_SENTRY_PROTECTED = None
class SolrClientException(Exception):
pass
class SolrClient(object):
def __init__(self, user, api=None):
self.user = user
self.api = api if api is not None else SolrApi(user=self.user)
def get_indexes(self, include_cores=False):
indexes = []
try:
if self.is_solr_cloud_mode():
collections = self.api.collections2()
for name in collections:
indexes.append({'name': name, 'type': 'collection', 'collections': []})
if self.is_solr_cloud_mode():
try:
if self.is_solr_six_or_more():
solr_aliases = self.api.list_aliases()
else:
solr_aliases = self.api.aliases()
for name in solr_aliases:
collections = solr_aliases[name].split()
indexes.append({'name': name, 'type': 'alias', 'collections': collections})
except Exception:
LOG.exception('Aliases could not be retrieved')
if not self.is_solr_cloud_mode() or include_cores:
solr_cores = self.api.cores()
for name in solr_cores:
indexes.append({'name': name, 'type': 'core', 'collections': []})
except Exception as e:
msg = _('Solr server could not be contacted properly: %s') % e
LOG.warning(msg)
raise PopupException(msg, detail=smart_str(e))
return sorted(indexes, key=lambda index: index['name'])
def create_index(self, name, fields, config_name=None, unique_key_field=None, df=None, shards=1, replication=1):
if self.is_solr_cloud_mode():
if self.is_solr_six_or_more():
config_sets = self.list_configs()
if not config_sets:
raise PopupException(_('Solr does not have any predefined (secure: %s) configSets: %s') % (self.is_sentry_protected(), self.list_configs()))
if not config_name or config_name not in config_sets:
config_name_target = 'managedTemplate'
if config_name_target in config_sets:
config_name = config_name_target
elif '_default' in config_sets:
config_name = '_default'
else:
config_name = config_sets[0]
# Note: uniqueKey is always 'id'
self.api.create_config(name, config_name, immutable=False)
self.api.create_collection2(name, config_name=name, shards=shards, replication=replication)
fields = [{
'name': field['name'],
'type': SolrClient._port_field_types(field)['type'],
'stored': field.get('stored', True),
'multiValued': field.get('multiValued', False)
} for field in fields if field['name'] != 'id'
]
self.api.add_fields(name, fields)
if df:
self.api.update_config(name, {
'update-requesthandler': {
"name": "/select",
"class": "solr.SearchHandler",
"defaults": {"df": df},
}
})
if self.is_solr_six_or_more():
self.api.update_config(name, {
'add-updateprocessor': {
"name" : "tolerant",
"class": "solr.TolerantUpdateProcessorFactory",
"maxErrors": "100"
}
})
else:
self._create_cloud_config(name, fields, unique_key_field, df)
self.api.create_collection2(name, config_name=config_name, shards=shards, replication=replication)
else:
self._create_non_solr_cloud_index(name, fields, unique_key_field, df)
def create_alias(self, name, collections):
return self.api.create_alias(name, collections)
def index(self, name, data, content_type='csv', version=None, **kwargs):
"""
e.g. Parameters: separator = ',', fieldnames = 'a,b,c', header=true, skip 'a,b', encapsulator="
escape=\, map, split, overwrite=true, rowid=id
"""
return self.api.update(name, data, content_type=content_type, version=version, **kwargs)
def exists(self, name):
try:
self.api.get_schema(name)
return True
except Exception as e:
LOG.info('Check if index %s existed failed: %s' % (name, e))
return False
def delete_index(self, name, keep_config=True):
if not self.is_solr_cloud_mode():
raise PopupException(_('Cannot remove non-Solr cloud cores.'))
result = self.api.delete_collection(name)
if result['status'] == 0:
# Delete instance directory.
if not keep_config:
if self.is_solr_six_or_more():
return self.api.delete_config(name)
else:
try:
root_node = '%s/%s' % (ZK_SOLR_CONFIG_NAMESPACE, name)
with ZookeeperClient(hosts=self.get_zookeeper_host(), read_only=False) as zc:
zc.delete_path(root_node)
except Exception as e:
# Re-create collection so that we don't have an orphan config
self.api.add_collection(name)
raise PopupException(_('Error in deleting Solr configurations.'), detail=e)
else:
if not 'Cannot unload non-existent core' in json.dumps(result):
raise PopupException(_('Could not remove collection: %(message)s') % result)
def sample_index(self, collection, rows=100):
return self.api.select(collection, rows=min(rows, 1000))
def get_config(self, collection):
return self.api.config(collection)
def list_configs(self):
return self.api.configs()
def list_schema(self, index_name):
return self.api.get_schema(index_name)
def delete_alias(self, name):
return self.api.delete_alias(name)
def update_config(self, name, properties):
return self.api.update_config(name, properties)
def is_solr_cloud_mode(self):
global _IS_SOLR_CLOUD
if _IS_SOLR_CLOUD is None:
self._fillup_properties()
return _IS_SOLR_CLOUD
def is_solr_six_or_more(self):
global _IS_SOLR_6_OR_MORE
if _IS_SOLR_6_OR_MORE is None:
self._fillup_properties()
return _IS_SOLR_6_OR_MORE
def is_solr_with_hdfs(self):
global _IS_SOLR_WITH_HDFS
if _IS_SOLR_WITH_HDFS is None:
self._fillup_properties()
return _IS_SOLR_WITH_HDFS
def is_sentry_protected(self):
global _IS_SENTRY_PROTECTED
if _IS_SENTRY_PROTECTED is None:
self._fillup_properties()
return _IS_SENTRY_PROTECTED
def get_zookeeper_host(self):
global _ZOOKEEPER_HOST
if _ZOOKEEPER_HOST is None:
self._fillup_properties()
return _ZOOKEEPER_HOST
# Deprecated
def _create_cloud_config(self, name, fields, unique_key_field, df):
with ZookeeperClient(hosts=self.get_zookeeper_host(), read_only=False) as zc:
tmp_path, solr_config_path = copy_configs(
fields=fields,
unique_key_field=unique_key_field,
df=df,
solr_cloud_mode=True,
is_solr_six_or_more=self.is_solr_six_or_more(),
is_solr_hdfs_mode=self.is_solr_with_hdfs(),
is_sentry_protected=self.is_sentry_protected()
)
try:
root_node = '%s/%s' % (ZK_SOLR_CONFIG_NAMESPACE, name)
config_root_path = '%s/%s' % (solr_config_path, 'conf')
if not zc.path_exists(root_node):
zc.copy_path(root_node, config_root_path)
else:
LOG.warning('Config %s already existing.' % name)
except Exception as e:
if zc.path_exists(root_node):
zc.delete_path(root_node)
raise PopupException(_('Could not create index: %s') % e)
finally:
shutil.rmtree(tmp_path)
# Deprecated
def _create_non_solr_cloud_index(self, name, fields, unique_key_field, df):
# Create instance directory locally.
instancedir = os.path.join(CORE_INSTANCE_DIR.get(), name)
if os.path.exists(instancedir):
raise PopupException(_("Instance directory %s already exists! Please remove it from the file system.") % instancedir)
try:
tmp_path, solr_config_path = copy_configs(fields, unique_key_field, df, False)
try:
shutil.move(solr_config_path, instancedir)
finally:
shutil.rmtree(tmp_path)
if not self.api.create_core(name, instancedir):
raise Exception('Failed to create core: %s' % name)
except Exception as e:
raise PopupException(_('Could not create index. Check error logs for more info.'), detail=e)
finally:
shutil.rmtree(instancedir)
def _fillup_properties(self):
global _IS_SOLR_CLOUD
global _IS_SOLR_6_OR_MORE
global _IS_SOLR_WITH_HDFS
global _ZOOKEEPER_HOST
global _IS_SENTRY_PROTECTED
properties = self.api.info_system()
_IS_SOLR_CLOUD = properties.get('mode', 'solrcloud') == 'solrcloud'
_IS_SOLR_6_OR_MORE = not str(properties.get('lucene', {}).get('solr-spec-version')).startswith('4.')
_IS_SOLR_WITH_HDFS = False
_ZOOKEEPER_HOST = properties.get('zkHost', get_solr_ensemble())
command_line_args = properties.get('jvm', {}).get('jmx', {}).get('commandLineArgs', [])
for command_line_arg in command_line_args:
if not _IS_SOLR_WITH_HDFS and 'solr.hdfs.home' in command_line_arg:
_IS_SOLR_WITH_HDFS = True
if '-DzkHost=' in command_line_arg:
_ZOOKEEPER_HOST = command_line_arg.split('-DzkHost=', 1)[1]
if '-Dsolr.authorization.sentry.site' in command_line_arg:
_IS_SENTRY_PROTECTED = True
@staticmethod
def _port_field_types(field):
if not field['type'].startswith('p'): # Check for automatically converting to new default Solr types
field['type'] = field['type'].replace('long', 'plong').replace('double', 'pdouble').replace('date', 'pdate')
return field
@staticmethod
def _reset_properties():
global _IS_SOLR_CLOUD
global _IS_SOLR_6_OR_MORE
global _IS_SOLR_WITH_HDFS
global _ZOOKEEPER_HOST
global _IS_SENTRY_PROTECTED
_IS_SOLR_CLOUD = _IS_SOLR_6_OR_MORE = _IS_SOLR_6_OR_MORE = _IS_SOLR_WITH_HDFS = _ZOOKEEPER_HOST = _IS_SENTRY_PROTECTED = None
# Used by morphline indexer
def get_index_schema(self, index_name):
try:
field_data = self.api.fields(index_name)
fields = self._format_flags(field_data['schema']['fields'])
uniquekey = self.api.uniquekey(index_name)
return uniquekey, fields
except Exception as e:
LOG.exception(e.message)
raise SolrClientException(_("Error in getting schema information for index '%s'" % index_name))
|
|
# -*- coding: utf-8 -*-
import pytest
import sqlparse
from sqlparse import sql, tokens as T
def test_grouping_parenthesis():
s = 'select (select (x3) x2) and (y2) bar'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert len(parsed.tokens) == 7
assert isinstance(parsed.tokens[2], sql.Parenthesis)
assert isinstance(parsed.tokens[-1], sql.Identifier)
assert len(parsed.tokens[2].tokens) == 5
assert isinstance(parsed.tokens[2].tokens[3], sql.Identifier)
assert isinstance(parsed.tokens[2].tokens[3].tokens[0], sql.Parenthesis)
assert len(parsed.tokens[2].tokens[3].tokens) == 3
def test_grouping_comments():
s = '/*\n * foo\n */ \n bar'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert len(parsed.tokens) == 2
@pytest.mark.parametrize('s', ['foo := 1;', 'foo := 1'])
def test_grouping_assignment(s):
parsed = sqlparse.parse(s)[0]
assert len(parsed.tokens) == 1
assert isinstance(parsed.tokens[0], sql.Assignment)
def test_grouping_identifiers():
s = 'select foo.bar from "myscheme"."table" where fail. order'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert isinstance(parsed.tokens[2], sql.Identifier)
assert isinstance(parsed.tokens[6], sql.Identifier)
assert isinstance(parsed.tokens[8], sql.Where)
s = 'select * from foo where foo.id = 1'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert isinstance(parsed.tokens[-1].tokens[-1].tokens[0], sql.Identifier)
s = 'select * from (select "foo"."id" from foo)'
parsed = sqlparse.parse(s)[0]
assert str(parsed) == s
assert isinstance(parsed.tokens[-1].tokens[3], sql.Identifier)
s = "INSERT INTO `test` VALUES('foo', 'bar');"
parsed = sqlparse.parse(s)[0]
types = [l.ttype for l in parsed.tokens if not l.is_whitespace()]
assert types == [T.DML, T.Keyword, None, T.Keyword, None, T.Punctuation]
s = "select 1.0*(a+b) as col, sum(c)/sum(d) from myschema.mytable"
parsed = sqlparse.parse(s)[0]
assert len(parsed.tokens) == 7
assert isinstance(parsed.tokens[2], sql.IdentifierList)
assert len(parsed.tokens[2].tokens) == 4
identifiers = list(parsed.tokens[2].get_identifiers())
assert len(identifiers) == 2
assert identifiers[0].get_alias() == "col"
def test_grouping_identifier_wildcard():
p = sqlparse.parse('a.*, b.id')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
assert isinstance(p.tokens[0].tokens[0], sql.Identifier)
assert isinstance(p.tokens[0].tokens[-1], sql.Identifier)
def test_grouping_identifier_name_wildcard():
p = sqlparse.parse('a.*')[0]
t = p.tokens[0]
assert t.get_name() == '*'
assert t.is_wildcard() is True
def test_grouping_identifier_invalid():
p = sqlparse.parse('a.')[0]
assert isinstance(p.tokens[0], sql.Identifier)
assert p.tokens[0].has_alias() is False
assert p.tokens[0].get_name() is None
assert p.tokens[0].get_real_name() is None
assert p.tokens[0].get_parent_name() == 'a'
def test_grouping_identifier_invalid_in_middle():
# issue261
s = 'SELECT foo. FROM foo'
p = sqlparse.parse(s)[0]
assert isinstance(p[2], sql.Identifier)
assert p[2][1].ttype == T.Punctuation
assert p[3].ttype == T.Whitespace
assert str(p[2]) == 'foo.'
def test_grouping_identifier_as_invalid():
# issue8
p = sqlparse.parse('foo as select *')[0]
assert len(p.tokens), 5
assert isinstance(p.tokens[0], sql.Identifier)
assert len(p.tokens[0].tokens) == 1
assert p.tokens[2].ttype == T.Keyword
def test_grouping_identifier_function():
p = sqlparse.parse('foo() as bar')[0]
assert isinstance(p.tokens[0], sql.Identifier)
assert isinstance(p.tokens[0].tokens[0], sql.Function)
p = sqlparse.parse('foo()||col2 bar')[0]
assert isinstance(p.tokens[0], sql.Identifier)
assert isinstance(p.tokens[0].tokens[0], sql.Operation)
assert isinstance(p.tokens[0].tokens[0].tokens[0], sql.Function)
@pytest.mark.parametrize('s', ['foo+100', 'foo + 100', 'foo*100'])
def test_grouping_operation(s):
p = sqlparse.parse(s)[0]
assert isinstance(p.tokens[0], sql.Operation)
def test_grouping_identifier_list():
p = sqlparse.parse('a, b, c')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
p = sqlparse.parse('(a, b, c)')[0]
assert isinstance(p.tokens[0].tokens[1], sql.IdentifierList)
def test_grouping_identifier_list_subquery():
"""identifier lists should still work in subqueries with aliases"""
p = sqlparse.parse("select * from ("
"select a, b + c as d from table) sub")[0]
subquery = p.tokens[-1].tokens[0]
idx, iden_list = subquery.token_next_by(i=sql.IdentifierList)
assert iden_list is not None
# all the identifiers should be within the IdentifierList
_, ilist = subquery.token_next_by(i=sql.Identifier, idx=idx)
assert ilist is None
def test_grouping_identifier_list_case():
p = sqlparse.parse('a, case when 1 then 2 else 3 end as b, c')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
p = sqlparse.parse('(a, case when 1 then 2 else 3 end as b, c)')[0]
assert isinstance(p.tokens[0].tokens[1], sql.IdentifierList)
def test_grouping_identifier_list_other():
# issue2
p = sqlparse.parse("select *, null, 1, 'foo', bar from mytable, x")[0]
assert isinstance(p.tokens[2], sql.IdentifierList)
assert len(p.tokens[2].tokens) == 13
def test_grouping_identifier_list_with_inline_comments():
# issue163
p = sqlparse.parse('foo /* a comment */, bar')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
assert isinstance(p.tokens[0].tokens[0], sql.Identifier)
assert isinstance(p.tokens[0].tokens[3], sql.Identifier)
def test_grouping_identifiers_with_operators():
p = sqlparse.parse('a+b as c from table where (d-e)%2= 1')[0]
assert len([x for x in p.flatten() if x.ttype == T.Name]) == 5
def test_grouping_identifier_list_with_order():
# issue101
p = sqlparse.parse('1, 2 desc, 3')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
assert isinstance(p.tokens[0].tokens[3], sql.Identifier)
assert str(p.tokens[0].tokens[3]) == '2 desc'
def test_grouping_where():
s = 'select * from foo where bar = 1 order by id desc'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert len(p.tokens) == 14
s = 'select x from (select y from foo where bar = 1) z'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert isinstance(p.tokens[-1].tokens[0].tokens[-2], sql.Where)
def test_returning_kw_ends_where_clause():
s = 'delete from foo where x > y returning z'
p = sqlparse.parse(s)[0]
assert isinstance(p.tokens[6], sql.Where)
assert p.tokens[7].ttype == T.Keyword
assert p.tokens[7].value == 'returning'
def test_grouping_typecast():
s = 'select foo::integer from bar'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[2].get_typecast() == 'integer'
assert p.tokens[2].get_name() == 'foo'
s = 'select (current_database())::information_schema.sql_identifier'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert (p.tokens[2].get_typecast() == 'information_schema.sql_identifier')
def test_grouping_alias():
s = 'select foo as bar from mytable'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[2].get_real_name() == 'foo'
assert p.tokens[2].get_alias() == 'bar'
s = 'select foo from mytable t1'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[6].get_real_name() == 'mytable'
assert p.tokens[6].get_alias() == 't1'
s = 'select foo::integer as bar from mytable'
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[2].get_alias() == 'bar'
s = ('SELECT DISTINCT '
'(current_database())::information_schema.sql_identifier AS view')
p = sqlparse.parse(s)[0]
assert str(p) == s
assert p.tokens[4].get_alias() == 'view'
def test_grouping_alias_case():
# see issue46
p = sqlparse.parse('CASE WHEN 1 THEN 2 ELSE 3 END foo')[0]
assert len(p.tokens) == 1
assert p.tokens[0].get_alias() == 'foo'
def test_grouping_alias_returns_none():
# see issue185
p = sqlparse.parse('foo.bar')[0]
assert len(p.tokens) == 1
assert p.tokens[0].get_alias() is None
def test_grouping_idlist_function():
# see issue10 too
p = sqlparse.parse('foo(1) x, bar')[0]
assert isinstance(p.tokens[0], sql.IdentifierList)
def test_grouping_comparison_exclude():
# make sure operators are not handled too lazy
p = sqlparse.parse('(=)')[0]
assert isinstance(p.tokens[0], sql.Parenthesis)
assert not isinstance(p.tokens[0].tokens[1], sql.Comparison)
p = sqlparse.parse('(a=1)')[0]
assert isinstance(p.tokens[0].tokens[1], sql.Comparison)
p = sqlparse.parse('(a>=1)')[0]
assert isinstance(p.tokens[0].tokens[1], sql.Comparison)
def test_grouping_function():
p = sqlparse.parse('foo()')[0]
assert isinstance(p.tokens[0], sql.Function)
p = sqlparse.parse('foo(null, bar)')[0]
assert isinstance(p.tokens[0], sql.Function)
assert len(list(p.tokens[0].get_parameters())) == 2
def test_grouping_function_not_in():
# issue183
p = sqlparse.parse('in(1, 2)')[0]
assert len(p.tokens) == 2
assert p.tokens[0].ttype == T.Keyword
assert isinstance(p.tokens[1], sql.Parenthesis)
def test_grouping_varchar():
p = sqlparse.parse('"text" Varchar(50) NOT NULL')[0]
assert isinstance(p.tokens[2], sql.Function)
def test_statement_get_type():
def f(sql):
return sqlparse.parse(sql)[0]
assert f('select * from foo').get_type() == 'SELECT'
assert f('update foo').get_type() == 'UPDATE'
assert f(' update foo').get_type() == 'UPDATE'
assert f('\nupdate foo').get_type() == 'UPDATE'
assert f('foo').get_type() == 'UNKNOWN'
# Statements that have a whitespace after the closing semicolon
# are parsed as two statements where later only consists of the
# trailing whitespace.
assert f('\n').get_type() == 'UNKNOWN'
def test_identifier_with_operators():
# issue 53
p = sqlparse.parse('foo||bar')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Operation)
# again with whitespaces
p = sqlparse.parse('foo || bar')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Operation)
def test_identifier_with_op_trailing_ws():
# make sure trailing whitespace isn't grouped with identifier
p = sqlparse.parse('foo || bar ')[0]
assert len(p.tokens) == 2
assert isinstance(p.tokens[0], sql.Operation)
assert p.tokens[1].ttype is T.Whitespace
def test_identifier_with_string_literals():
p = sqlparse.parse("foo + 'bar'")[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Operation)
# This test seems to be wrong. It was introduced when fixing #53, but #111
# showed that this shouldn't be an identifier at all. I'm leaving this
# commented in the source for a while.
# def test_identifier_string_concat():
# p = sqlparse.parse("'foo' || bar")[0]
# assert len(p.tokens) == 1
# assert isinstance(p.tokens[0], sql.Identifier)
def test_identifier_consumes_ordering():
# issue89
p = sqlparse.parse('select * from foo order by c1 desc, c2, c3')[0]
assert isinstance(p.tokens[-1], sql.IdentifierList)
ids = list(p.tokens[-1].get_identifiers())
assert len(ids) == 3
assert ids[0].get_name() == 'c1'
assert ids[0].get_ordering() == 'DESC'
assert ids[1].get_name() == 'c2'
assert ids[1].get_ordering() is None
def test_comparison_with_keywords():
# issue90
# in fact these are assignments, but for now we don't distinguish them
p = sqlparse.parse('foo = NULL')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'foo'
assert p.tokens[0].right.value == 'NULL'
# make sure it's case-insensitive
p = sqlparse.parse('foo = null')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
def test_comparison_with_floats():
# issue145
p = sqlparse.parse('foo = 25.5')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'foo'
assert p.tokens[0].right.value == '25.5'
def test_comparison_with_parenthesis():
# issue23
p = sqlparse.parse('(3 + 4) = 7')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
comp = p.tokens[0]
assert isinstance(comp.left, sql.Parenthesis)
assert comp.right.ttype is T.Number.Integer
def test_comparison_with_strings():
# issue148
p = sqlparse.parse("foo = 'bar'")[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert p.tokens[0].right.value == "'bar'"
assert p.tokens[0].right.ttype == T.String.Single
def test_comparison_with_functions():
# issue230
p = sqlparse.parse('foo = DATE(bar.baz)')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'foo'
assert p.tokens[0].right.value == 'DATE(bar.baz)'
p = sqlparse.parse('DATE(foo.bar) = DATE(bar.baz)')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'DATE(foo.bar)'
assert p.tokens[0].right.value == 'DATE(bar.baz)'
p = sqlparse.parse('DATE(foo.bar) = bar.baz')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'DATE(foo.bar)'
assert p.tokens[0].right.value == 'bar.baz'
@pytest.mark.parametrize('start', ['FOR', 'FOREACH'])
def test_forloops(start):
p = sqlparse.parse('{0} foo in bar LOOP foobar END LOOP'.format(start))[0]
assert (len(p.tokens)) == 1
assert isinstance(p.tokens[0], sql.For)
def test_nested_for():
p = sqlparse.parse('FOR foo LOOP FOR bar LOOP END LOOP END LOOP')[0]
assert len(p.tokens) == 1
for1 = p.tokens[0]
assert for1.tokens[0].value == 'FOR'
assert for1.tokens[-1].value == 'END LOOP'
for2 = for1.tokens[6]
assert isinstance(for2, sql.For)
assert for2.tokens[0].value == 'FOR'
assert for2.tokens[-1].value == 'END LOOP'
def test_begin():
p = sqlparse.parse('BEGIN foo END')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Begin)
def test_keyword_followed_by_parenthesis():
p = sqlparse.parse('USING(somecol')[0]
assert len(p.tokens) == 3
assert p.tokens[0].ttype == T.Keyword
assert p.tokens[1].ttype == T.Punctuation
def test_nested_begin():
p = sqlparse.parse('BEGIN foo BEGIN bar END END')[0]
assert len(p.tokens) == 1
outer = p.tokens[0]
assert outer.tokens[0].value == 'BEGIN'
assert outer.tokens[-1].value == 'END'
inner = outer.tokens[4]
assert inner.tokens[0].value == 'BEGIN'
assert inner.tokens[-1].value == 'END'
assert isinstance(inner, sql.Begin)
def test_aliased_column_without_as():
p = sqlparse.parse('foo bar')[0].tokens
assert len(p) == 1
assert p[0].get_real_name() == 'foo'
assert p[0].get_alias() == 'bar'
p = sqlparse.parse('foo.bar baz')[0].tokens[0]
assert p.get_parent_name() == 'foo'
assert p.get_real_name() == 'bar'
assert p.get_alias() == 'baz'
def test_qualified_function():
p = sqlparse.parse('foo()')[0].tokens[0]
assert p.get_parent_name() is None
assert p.get_real_name() == 'foo'
p = sqlparse.parse('foo.bar()')[0].tokens[0]
assert p.get_parent_name() == 'foo'
assert p.get_real_name() == 'bar'
def test_aliased_function_without_as():
p = sqlparse.parse('foo() bar')[0].tokens[0]
assert p.get_parent_name() is None
assert p.get_real_name() == 'foo'
assert p.get_alias() == 'bar'
p = sqlparse.parse('foo.bar() baz')[0].tokens[0]
assert p.get_parent_name() == 'foo'
assert p.get_real_name() == 'bar'
assert p.get_alias() == 'baz'
def test_aliased_literal_without_as():
p = sqlparse.parse('1 foo')[0].tokens
assert len(p) == 1
assert p[0].get_alias() == 'foo'
|
|
#!/usr/bin/env python
'''
brozzler/cli.py - brozzler command line executables
Copyright (C) 2014-2019 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import argparse
import brozzler
import brozzler.worker
import datetime
import json
import logging
import os
import re
import requests
import doublethink
import signal
import string
import sys
import threading
import time
import traceback
import warnings
import yaml
import shutil
import base64
import rethinkdb as r
def add_common_options(arg_parser, argv=None):
argv = argv or sys.argv
arg_parser.add_argument(
'-q', '--quiet', dest='log_level', action='store_const',
default=logging.INFO, const=logging.NOTICE, help='quiet logging')
arg_parser.add_argument(
'-v', '--verbose', dest='log_level', action='store_const',
default=logging.INFO, const=logging.DEBUG, help=(
'verbose logging'))
arg_parser.add_argument(
'--trace', dest='log_level', action='store_const',
default=logging.INFO, const=logging.TRACE, help=(
'very verbose logging'))
# arg_parser.add_argument(
# '-s', '--silent', dest='log_level', action='store_const',
# default=logging.INFO, const=logging.CRITICAL)
arg_parser.add_argument(
'--version', action='version',
version='brozzler %s - %s' % (
brozzler.__version__, os.path.basename(argv[0])))
def add_rethinkdb_options(arg_parser):
arg_parser.add_argument(
'--rethinkdb-servers', dest='rethinkdb_servers',
default=os.environ.get('BROZZLER_RETHINKDB_SERVERS', 'localhost'),
help=(
'rethinkdb servers, e.g. '
'db0.foo.org,db0.foo.org:38015,db1.foo.org (default is the '
'value of environment variable BROZZLER_RETHINKDB_SERVERS)'))
arg_parser.add_argument(
'--rethinkdb-db', dest='rethinkdb_db',
default=os.environ.get('BROZZLER_RETHINKDB_DB', 'brozzler'),
help=(
'rethinkdb database name (default is the value of environment '
'variable BROZZLER_RETHINKDB_DB)'))
def rethinker(args):
servers = args.rethinkdb_servers or 'localhost'
db = args.rethinkdb_db or os.environ.get(
'BROZZLER_RETHINKDB_DB') or 'brozzler'
return doublethink.Rethinker(servers.split(','), db)
def configure_logging(args):
logging.basicConfig(
stream=sys.stderr, level=args.log_level, format=(
'%(asctime)s %(process)d %(levelname)s %(threadName)s '
'%(name)s.%(funcName)s(%(filename)s:%(lineno)d) %(message)s'))
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARN)
warnings.simplefilter(
'ignore', category=requests.packages.urllib3.exceptions.InsecureRequestWarning)
warnings.simplefilter(
'ignore', category=requests.packages.urllib3.exceptions.InsecurePlatformWarning)
def suggest_default_chrome_exe():
# mac os x application executable paths
for path in [
'/Applications/Chromium.app/Contents/MacOS/Chromium',
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome']:
if os.path.exists(path):
return path
# "chromium-browser" is the executable on ubuntu trusty
# https://github.com/internetarchive/brozzler/pull/6/files uses "chromium"
# google chrome executable names taken from these packages:
# http://www.ubuntuupdates.org/ppa/google_chrome
for exe in [
'chromium-browser', 'chromium', 'google-chrome',
'google-chrome-stable', 'google-chrome-beta',
'google-chrome-unstable']:
if shutil.which(exe):
return exe
return 'chromium-browser'
class BetterArgumentDefaultsHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter):
'''
Like argparse.ArgumentDefaultsHelpFormatter but omits the default value
for arguments with action='store_const'.
'''
def _get_help_string(self, action):
if isinstance(action, argparse._StoreConstAction):
return action.help
else:
return super()._get_help_string(action)
def brozzle_page(argv=None):
'''
Command line utility entry point for brozzling a single page. Opens url in
a browser, running some javascript behaviors, and prints outlinks.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzle-page - brozzle a single page',
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument('url', metavar='URL', help='page url')
arg_parser.add_argument(
'-e', '--chrome-exe', dest='chrome_exe',
default=suggest_default_chrome_exe(),
help='executable to use to invoke chrome')
arg_parser.add_argument(
'--behavior-parameters', dest='behavior_parameters',
default=None, help=(
'json blob of parameters to populate the javascript behavior '
'template, e.g. {"parameter_username":"x",'
'"parameter_password":"y"}'))
arg_parser.add_argument(
'--username', dest='username', default=None,
help='use this username to try to log in if a login form is found')
arg_parser.add_argument(
'--password', dest='password', default=None,
help='use this password to try to log in if a login form is found')
arg_parser.add_argument(
'--proxy', dest='proxy', default=None, help='http proxy')
arg_parser.add_argument(
'--browser_throughput', type=int, dest='download_throughput', default=-1,
help='Chrome DevTools downloadThroughput for Network.emulateNetworkConditions')
arg_parser.add_argument(
'--screenshot-full-page', dest='screenshot_full_page',
action='store_true')
arg_parser.add_argument(
'--skip-extract-outlinks', dest='skip_extract_outlinks',
action='store_true')
arg_parser.add_argument(
'--skip-visit-hashtags', dest='skip_visit_hashtags',
action='store_true')
arg_parser.add_argument(
'--skip-youtube-dl', dest='skip_youtube_dl', action='store_true')
arg_parser.add_argument(
'--simpler404', dest='simpler404', action='store_true')
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
brozzler.chrome.check_version(args.chrome_exe)
behavior_parameters = {}
if args.behavior_parameters:
behavior_parameters = json.loads(args.behavior_parameters)
site = brozzler.Site(None, {
'id': -1, 'seed': args.url, 'behavior_parameters': behavior_parameters,
'username': args.username, 'password': args.password})
page = brozzler.Page(None, {'url': args.url, 'site_id': site.id})
worker = brozzler.BrozzlerWorker(
frontier=None, proxy=args.proxy,
skip_extract_outlinks=args.skip_extract_outlinks,
skip_visit_hashtags=args.skip_visit_hashtags,
skip_youtube_dl=args.skip_youtube_dl,
simpler404=args.simpler404,
screenshot_full_page=args.screenshot_full_page,
download_throughput=args.download_throughput)
def on_screenshot(screenshot_jpeg):
OK_CHARS = string.ascii_letters + string.digits
filename = '/tmp/{}-{:%Y%m%d%H%M%S}.jpg'.format(
''.join(ch if ch in OK_CHARS else '_' for ch in args.url),
datetime.datetime.now())
with open(filename, 'wb') as f:
f.write(screenshot_jpeg)
logging.info('wrote screenshot to %s', filename)
browser = brozzler.Browser(chrome_exe=args.chrome_exe)
try:
browser.start(proxy=args.proxy)
outlinks = worker.brozzle_page(
browser, site, page, on_screenshot=on_screenshot,
enable_youtube_dl=not args.skip_youtube_dl)
logging.info('outlinks: \n\t%s', '\n\t'.join(sorted(outlinks)))
except brozzler.ReachedLimit as e:
logging.error('reached limit %s', e)
except brozzler.PageInterstitialShown as e:
logging.error('page interstitial shown %s', e)
finally:
browser.stop()
def brozzler_new_job(argv=None):
'''
Command line utility entry point for queuing a new brozzler job. Takes a
yaml brozzler job configuration file, creates job, sites, and pages objects
in rethinkdb, which brozzler-workers will look at and start crawling.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzler-new-job - queue new job with brozzler',
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'job_conf_file', metavar='JOB_CONF_FILE',
help='brozzler job configuration file in yaml')
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
try:
brozzler.new_job_file(frontier, args.job_conf_file)
except brozzler.InvalidJobConf as e:
print('brozzler-new-job: invalid job file:', args.job_conf_file, file=sys.stderr)
print(' ' + yaml.dump(e.errors).rstrip().replace('\n', '\n '), file=sys.stderr)
sys.exit(1)
def brozzler_new_site(argv=None):
'''
Command line utility entry point for queuing a new brozzler site.
Takes a seed url and creates a site and page object in rethinkdb, which
brozzler-workers will look at and start crawling.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzler-new-site - register site to brozzle',
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument('seed', metavar='SEED', help='seed url')
add_rethinkdb_options(arg_parser)
arg_parser.add_argument(
'--time-limit', dest='time_limit', default=None,
help='time limit in seconds for this site')
arg_parser.add_argument(
'--ignore-robots', dest='ignore_robots', action='store_true',
help='ignore robots.txt for this site')
arg_parser.add_argument(
'--warcprox-meta', dest='warcprox_meta',
help=(
'Warcprox-Meta http request header to send with each request; '
'must be a json blob, ignored unless warcprox features are '
'enabled'))
arg_parser.add_argument(
'--behavior-parameters', dest='behavior_parameters',
default=None, help=(
'json blob of parameters to populate the javascript behavior '
'template, e.g. {"parameter_username":"x",'
'"parameter_password":"y"}'))
arg_parser.add_argument(
'--username', dest='username', default=None,
help='use this username to try to log in if a login form is found')
arg_parser.add_argument(
'--password', dest='password', default=None,
help='use this password to try to log in if a login form is found')
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
site = brozzler.Site(rr, {
'seed': args.seed,
'time_limit': int(args.time_limit) if args.time_limit else None,
'ignore_robots': args.ignore_robots,
'warcprox_meta': json.loads(
args.warcprox_meta) if args.warcprox_meta else None,
'behavior_parameters': json.loads(
args.behavior_parameters) if args.behavior_parameters else None,
'username': args.username,
'password': args.password})
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
def brozzler_worker(argv=None):
'''
Main entry point for brozzler, gets sites and pages to brozzle from
rethinkdb, brozzles them.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
add_rethinkdb_options(arg_parser)
arg_parser.add_argument(
'-e', '--chrome-exe', dest='chrome_exe',
default=suggest_default_chrome_exe(),
help='executable to use to invoke chrome')
arg_parser.add_argument(
'-n', '--max-browsers', dest='max_browsers', default='1',
help='max number of chrome instances simultaneously browsing pages')
arg_parser.add_argument(
'--proxy', dest='proxy', default=None, help='http proxy')
arg_parser.add_argument(
'--browser_throughput', type=int, dest='download_throughput', default=-1,
help='Chrome DevTools downloadThroughput for Network.emulateNetworkConditions')
arg_parser.add_argument(
'--warcprox-auto', dest='warcprox_auto', action='store_true',
help=(
'when needed, choose an available instance of warcprox from '
'the rethinkdb service registry'))
arg_parser.add_argument(
'--skip-extract-outlinks', dest='skip_extract_outlinks',
action='store_true', help=argparse.SUPPRESS)
arg_parser.add_argument(
'--skip-visit-hashtags', dest='skip_visit_hashtags',
action='store_true', help=argparse.SUPPRESS)
arg_parser.add_argument(
'--skip-youtube-dl', dest='skip_youtube_dl',
action='store_true', help=argparse.SUPPRESS)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
brozzler.chrome.check_version(args.chrome_exe)
def dump_state(signum, frame):
signal.signal(signal.SIGQUIT, signal.SIG_IGN)
try:
state_strs = []
frames = sys._current_frames()
threads = {th.ident: th for th in threading.enumerate()}
for ident in frames:
if threads[ident]:
state_strs.append(str(threads[ident]))
else:
state_strs.append('<???:thread:ident=%s>' % ident)
stack = traceback.format_stack(frames[ident])
state_strs.append(''.join(stack))
logging.info(
'dumping state (caught signal %s)\n%s' % (
signum, '\n'.join(state_strs)))
except BaseException as e:
logging.error('exception dumping state: %s' % e)
finally:
signal.signal(signal.SIGQUIT, dump_state)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
service_registry = doublethink.ServiceRegistry(rr)
worker = brozzler.worker.BrozzlerWorker(
frontier, service_registry, max_browsers=int(args.max_browsers),
chrome_exe=args.chrome_exe, proxy=args.proxy,
warcprox_auto=args.warcprox_auto,
skip_extract_outlinks=args.skip_extract_outlinks,
skip_visit_hashtags=args.skip_visit_hashtags,
skip_youtube_dl=args.skip_youtube_dl)
signal.signal(signal.SIGQUIT, dump_state)
signal.signal(signal.SIGTERM, lambda s,f: worker.stop())
signal.signal(signal.SIGINT, lambda s,f: worker.stop())
th = threading.Thread(target=worker.run, name='BrozzlerWorkerThread')
th.start()
th.join()
logging.info('brozzler-worker is all done, exiting')
def brozzler_ensure_tables(argv=None):
'''
Creates rethinkdb tables if they don't already exist. Brozzler
(brozzler-worker, brozzler-new-job, etc) normally creates the tables it
needs on demand at startup, but if multiple instances are starting up at
the same time, you can end up with duplicate broken tables. So it's a good
idea to use this utility at an early step when spinning up a cluster.
'''
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
# services table
doublethink.ServiceRegistry(rr)
# sites, pages, jobs tables
brozzler.frontier.RethinkDbFrontier(rr)
class Jsonner(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.isoformat()
elif isinstance(o, bytes):
return base64.b64encode(o).decode('ascii')
else:
return json.JSONEncoder.default(self, o)
def brozzler_list_jobs(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'--yaml', dest='yaml', action='store_true', help=(
'yaml output (default is json)'))
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--active', dest='active', action='store_true', help=(
'list active jobs'))
group.add_argument(
'--all', dest='all', action='store_true', help=(
'list all jobs'))
group.add_argument(
'--job', dest='job', metavar='JOB_ID', help=(
'list only the specified job'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
if args.job is not None:
try:
job_id = int(args.job)
except ValueError:
job_id = args.job
reql = rr.table('jobs').get(job_id)
logging.debug('querying rethinkdb: %s', reql)
result = reql.run()
if result:
results = [reql.run()]
else:
logging.error('no such job with id %r', job_id)
sys.exit(1)
else:
reql = rr.table('jobs').order_by('id')
if args.active:
reql = reql.filter({'status': 'ACTIVE'})
logging.debug('querying rethinkdb: %s', reql)
results = reql.run()
if args.yaml:
yaml.dump_all(
results, stream=sys.stdout, explicit_start=True,
default_flow_style=False)
else:
for result in results:
print(json.dumps(result, cls=Jsonner, indent=2))
def brozzler_list_sites(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'--yaml', dest='yaml', action='store_true', help=(
'yaml output (default is json)'))
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--active', dest='active', action='store_true', help=(
'list all active sites'))
group.add_argument(
'--job', dest='job', metavar='JOB_ID', help=(
'list sites for a particular job'))
group.add_argument(
'--jobless', dest='jobless', action='store_true', help=(
'list all jobless sites'))
group.add_argument(
'--site', dest='site', metavar='SITE_ID', help=(
'list only the specified site'))
group.add_argument(
'--all', dest='all', action='store_true', help=(
'list all sites'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
reql = rr.table('sites')
if args.job:
try:
job_id = int(args.job)
except ValueError:
job_id = args.job
reql = reql.get_all(job_id, index='job_id')
elif args.jobless:
reql = reql.filter(~r.row.has_fields('job_id'))
elif args.active:
reql = reql.between(
['ACTIVE', r.minval], ['ACTIVE', r.maxval],
index='sites_last_disclaimed')
elif args.site:
reql = reql.get_all(args.site)
logging.debug('querying rethinkdb: %s', reql)
results = reql.run()
if args.yaml:
yaml.dump_all(
results, stream=sys.stdout, explicit_start=True,
default_flow_style=False)
else:
for result in results:
print(json.dumps(result, cls=Jsonner, indent=2))
def brozzler_list_pages(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'--yaml', dest='yaml', action='store_true', help=(
'yaml output (default is json)'))
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--job', dest='job', metavar='JOB_ID', help=(
'list pages for all sites of a particular job'))
group.add_argument(
'--site', dest='site', metavar='SITE_ID', help=(
'list pages for the specified site'))
# group.add_argument(
# '--page', dest='page', metavar='PAGE_ID', help=(
# 'list only the specified page'))
group = arg_parser.add_mutually_exclusive_group()
group.add_argument(
'--queued', dest='queued', action='store_true', help=(
'limit to queued pages'))
group.add_argument(
'--brozzled', dest='brozzled', action='store_true', help=(
'limit to pages that have already been brozzled'))
group.add_argument(
'--claimed', dest='claimed', action='store_true', help=(
'limit to pages that are currently claimed by a brozzler '
'worker'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
if args.job:
try:
job_id = int(args.job)
except ValueError:
job_id = args.job
reql = rr.table('sites').get_all(job_id, index='job_id')['id']
logging.debug('querying rethinkb: %s', reql)
site_ids = reql.run()
elif args.site:
try:
site_ids = [int(args.site)]
except ValueError:
site_ids = [args.site]
for site_id in site_ids:
reql = rr.table('pages')
if args.queued:
reql = reql.between(
[site_id, 0, r.minval], [site_id, 0, r.maxval],
index='least_hops')
elif args.brozzled:
reql = reql.between(
[site_id, 1, r.minval], [site_id, r.maxval, r.maxval],
index='least_hops')
else:
reql = reql.between(
[site_id, 0, r.minval], [site_id, r.maxval, r.maxval],
index='least_hops')
reql = reql.order_by(index="least_hops")
if args.claimed:
reql = reql.filter({'claimed': True})
logging.debug('querying rethinkb: %s', reql)
results = reql.run()
if args.yaml:
yaml.dump_all(
results, stream=sys.stdout, explicit_start=True,
default_flow_style=False)
else:
for result in results:
print(json.dumps(result, cls=Jsonner, indent=2))
def brozzler_purge(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description='brozzler-purge - purge crawl state from rethinkdb',
formatter_class=BetterArgumentDefaultsHelpFormatter)
group = arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--job', dest='job', metavar='JOB_ID', help=(
'purge crawl state from rethinkdb for a job, including all '
'sites and pages'))
group.add_argument(
'--site', dest='site', metavar='SITE_ID', help=(
'purge crawl state from rethinkdb for a site, including all '
'pages'))
group.add_argument(
'--finished-before', dest='finished_before', metavar='YYYY-MM-DD',
help=('purge crawl state from rethinkdb for a jobs that ended '
'before this date'))
arg_parser.add_argument(
'--force', dest='force', action='store_true', help=(
'purge even if job or site is still has status ACTIVE'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
if args.job:
try:
job_id = int(args.job)
except ValueError:
job_id = args.job
job = brozzler.Job.load(rr, job_id)
if not job:
logging.fatal('no such job %r', job_id)
sys.exit(1)
if job.status == 'ACTIVE':
if args.force:
logging.warning(
'job %s has status ACTIVE, purging anyway because '
'--force was supplied', job_id)
else:
logging.fatal(
'refusing to purge job %s because status is ACTIVE '
'(override with --force)', job_id)
sys.exit(1)
_purge_job(rr, job_id)
elif args.site:
site_id = args.site
site = brozzler.Site.load(rr, site_id)
if not site:
logging.fatal('no such job %r', job_id)
sys.exit(1)
if site.status == 'ACTIVE':
if args.force:
logging.warning(
'site %s has status ACTIVE, purging anyway because '
'--force was supplied', site_id)
else:
logging.fatal(
'refusing to purge site %s because status is ACTIVE '
'(override with --force)', site_id)
sys.exit(1)
_purge_site(rr, site_id)
elif args.finished_before:
finished_before = datetime.datetime.strptime(
args.finished_before, '%Y-%m-%d').replace(
tzinfo=doublethink.UTC)
reql = rr.table('jobs').filter(
r.row['finished'].default(r.maxval).lt(finished_before).or_(
r.row['starts_and_stops'].nth(-1)['stop'].default(r.maxval).lt(finished_before)))
logging.debug(
'retrieving jobs older than %s: %s', finished_before, reql)
for job in reql.run():
# logging.info('job %s finished=%s starts_and_stops[-1]["stop"]=%s',
# job['id'], job.get('finished'),
# job.get('starts_and_stops', [{'stop':None}])[-1]['stop'])
_purge_job(rr, job['id'])
def _purge_site(rr, site_id):
reql = rr.table('pages').between(
[site_id, r.minval, r.minval],
[site_id, r.maxval, r.maxval],
index='priority_by_site').delete()
logging.debug('purging pages for site %s: %s', site_id, reql)
result = reql.run()
logging.info('purged pages for site %s: %s', site_id, result)
reql = rr.table('sites').get(site_id).delete()
logging.debug('purging site %s: %s', site_id, reql)
result = reql.run()
logging.info('purged site %s: %s', site_id, result)
def _purge_job(rr, job_id):
reql = rr.table('sites').get_all(job_id, index='job_id').get_field('id')
logging.debug('querying rethinkdb: %s', reql)
site_ids = list(reql.run())
for site_id in site_ids:
_purge_site(rr, site_id)
reql = rr.table('jobs').get(job_id).delete()
logging.debug('purging job %s: %s', job_id, reql)
result = reql.run()
logging.info('purged job %s: %s', job_id, result)
def brozzler_list_captures(argv=None):
'''
Handy utility for looking up entries in the rethinkdb "captures" table by
url or sha1.
'''
import urlcanon
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'-p', '--prefix', dest='prefix', action='store_true', help=(
'use prefix match for url (n.b. may not work as expected if '
'searching key has query string because canonicalization can '
'reorder query parameters)'))
arg_parser.add_argument(
'--yaml', dest='yaml', action='store_true', help=(
'yaml output (default is json)'))
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
arg_parser.add_argument(
'url_or_sha1', metavar='URL_or_SHA1',
help='url or sha1 to look up in captures table')
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
if args.url_or_sha1[:5] == 'sha1:':
if args.prefix:
logging.warning(
'ignoring supplied --prefix option which does not apply '
'to lookup by sha1')
# assumes it's already base32 (XXX could detect if hex and convert)
sha1base32 = args.url_or_sha1[5:].upper()
reql = rr.table('captures').between(
[sha1base32, r.minval, r.minval],
[sha1base32, r.maxval, r.maxval],
index='sha1_warc_type')
logging.debug('querying rethinkdb: %s', reql)
results = reql.run()
else:
key = urlcanon.semantic(args.url_or_sha1).surt().decode('ascii')
abbr_start_key = key[:150]
if args.prefix:
# surt is necessarily ascii and \x7f is the last ascii character
abbr_end_key = key[:150] + '\x7f'
end_key = key + '\x7f'
else:
abbr_end_key = key[:150]
end_key = key
reql = rr.table('captures').between(
[abbr_start_key, r.minval],
[abbr_end_key, r.maxval],
index='abbr_canon_surt_timestamp', right_bound='closed')
reql = reql.order_by(index='abbr_canon_surt_timestamp')
reql = reql.filter(
lambda capture: (capture['canon_surt'] >= key)
& (capture['canon_surt'] <= end_key))
logging.debug('querying rethinkdb: %s', reql)
results = reql.run()
if args.yaml:
yaml.dump_all(
results, stream=sys.stdout, explicit_start=True,
default_flow_style=False)
else:
for result in results:
print(json.dumps(result, cls=Jsonner, indent=2))
def brozzler_stop_crawl(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
formatter_class=BetterArgumentDefaultsHelpFormatter)
group = arg_parser.add_mutually_exclusive_group(required=True)
add_rethinkdb_options(arg_parser)
group.add_argument(
'--job', dest='job_id', metavar='JOB_ID', help=(
'request crawl stop for the specified job'))
group.add_argument(
'--site', dest='site_id', metavar='SITE_ID', help=(
'request crawl stop for the specified site'))
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
if args.job_id:
try:
job_id = int(args.job_id)
except ValueError:
job_id = args.job_id
job = brozzler.Job.load(rr, job_id)
if not job:
logging.fatal('job not found with id=%r', job_id)
sys.exit(1)
job.stop_requested = doublethink.utcnow()
job.save()
elif args.site_id:
try:
site_id = int(args.site_id)
except ValueError:
site_id = args.site_id
site = brozzler.Site.load(rr, site_id)
if not site:
logging.fatal('site not found with id=%r', site_id)
sys.exit(1)
site.stop_requested = doublethink.utcnow()
site.save()
|
|
import numpy as np
import theano
import theano.tensor as T
floatX = theano.config.floatX
from keras.layers.recurrent import Recurrent, GRU, LSTM
from keras import backend as K
from seya.utils import rnn_states
tol = 1e-4
def _wta(X):
M = K.max(X, axis=-1, keepdims=True)
R = K.switch(K.equal(X, M), X, 0.)
return R
def _update_controller(self, inp, h_tm1, M):
"""We have to update the inner RNN inside the NTM, this
is the function to do it. Pretty much copy+pasta from Keras
"""
x = T.concatenate([inp, M], axis=-1)
#1 is for gru, 2 is for lstm
if len(h_tm1) in [1,2]:
if hasattr(self.rnn,"get_constants"):
BW,BU = self.rnn.get_constants(x)
h_tm1 += (BW,BU)
# update state
_, h = self.rnn.step(x, h_tm1)
return h
def _circulant(leng, n_shifts):
"""
I confess, I'm actually proud of this hack. I hope you enjoy!
This will generate a tensor with `n_shifts` of rotated versions the
identity matrix. When this tensor is multiplied by a vector
the result are `n_shifts` shifted versions of that vector. Since
everything is done with inner products, everything is differentiable.
Paramters:
----------
leng: int > 0, number of memory locations
n_shifts: int > 0, number of allowed shifts (if 1, no shift)
Returns:
--------
shift operation, a tensor with dimensions (n_shifts, leng, leng)
"""
eye = np.eye(leng)
shifts = range(n_shifts//2, -n_shifts//2, -1)
C = np.asarray([np.roll(eye, s, axis=1) for s in shifts])
return theano.shared(C.astype(theano.config.floatX))
def _renorm(x):
return x / (x.sum(axis=1, keepdims=True))
def _softmax(x):
wt = x.flatten(ndim=2)
w = T.nnet.softmax(wt)
return w.reshape(x.shape) # T.clip(s, 0, 1)
def _cosine_distance(M, k):
dot = (M * k[:, None, :]).sum(axis=-1)
nM = T.sqrt((M**2).sum(axis=-1))
nk = T.sqrt((k**2).sum(axis=-1, keepdims=True))
return dot / (nM * nk)
class NeuralTuringMachine(Recurrent):
""" Neural Turing Machines
Non obvious parameter:
----------------------
shift_range: int, number of available shifts, ex. if 3, avilable shifts are
(-1, 0, 1)
n_slots: number of memory locations
m_length: memory length at each location
Known issues:
-------------
Theano may complain when n_slots == 1.
"""
def __init__(self, output_dim, n_slots, m_length, shift_range=3,
inner_rnn='gru',
init='glorot_uniform', inner_init='orthogonal',
input_dim=None, input_length=None, **kwargs):
self.output_dim = output_dim
self.n_slots = n_slots
self.m_length = m_length
self.shift_range = shift_range
self.init = init
self.inner_init = inner_init
self.inner_rnn = inner_rnn
self.input_dim = input_dim
self.input_length = input_length
if self.input_dim:
kwargs['input_shape'] = (self.input_length, self.input_dim)
super(NeuralTuringMachine, self).__init__(**kwargs)
def build(self):
input_leng, input_dim = self.input_shape[1:]
self.input = T.tensor3()
if self.inner_rnn == 'gru':
self.rnn = GRU(
activation='relu',
input_dim=input_dim+self.m_length,
input_length=input_leng,
output_dim=self.output_dim, init=self.init,
inner_init=self.inner_init)
elif self.inner_rnn == 'lstm':
self.rnn = LSTM(
input_dim=input_dim+self.m_length,
input_length=input_leng,
output_dim=self.output_dim, init=self.init,
forget_bias_init='zero',
inner_init=self.inner_init)
else:
raise ValueError('this inner_rnn is not implemented yet.')
self.rnn.build()
# initial memory, state, read and write vecotrs
self.M = theano.shared((.001*np.ones((1,)).astype(floatX)))
self.init_h = K.zeros((self.output_dim))
self.init_wr = self.rnn.init((self.n_slots,))
self.init_ww = self.rnn.init((self.n_slots,))
# write
self.W_e = self.rnn.init((self.output_dim, self.m_length)) # erase
self.b_e = K.zeros((self.m_length))
self.W_a = self.rnn.init((self.output_dim, self.m_length)) # add
self.b_a = K.zeros((self.m_length))
# get_w parameters for reading operation
self.W_k_read = self.rnn.init((self.output_dim, self.m_length))
self.b_k_read = self.rnn.init((self.m_length, ))
self.W_c_read = self.rnn.init((self.output_dim, 3)) # 3 = beta, g, gamma see eq. 5, 7, 9
self.b_c_read = K.zeros((3))
self.W_s_read = self.rnn.init((self.output_dim, self.shift_range))
self.b_s_read = K.zeros((self.shift_range)) # b_s lol! not intentional
# get_w parameters for writing operation
self.W_k_write = self.rnn.init((self.output_dim, self.m_length))
self.b_k_write = self.rnn.init((self.m_length, ))
self.W_c_write = self.rnn.init((self.output_dim, 3)) # 3 = beta, g, gamma see eq. 5, 7, 9
self.b_c_write = K.zeros((3))
self.W_s_write = self.rnn.init((self.output_dim, self.shift_range))
self.b_s_write = K.zeros((self.shift_range))
self.C = _circulant(self.n_slots, self.shift_range)
self.trainable_weights = self.rnn.trainable_weights + [
self.W_e, self.b_e,
self.W_a, self.b_a,
self.W_k_read, self.b_k_read,
self.W_c_read, self.b_c_read,
self.W_s_read, self.b_s_read,
self.W_k_write, self.b_k_write,
self.W_s_write, self.b_s_write,
self.W_c_write, self.b_c_write,
self.M,
self.init_h, self.init_wr, self.init_ww]
if self.inner_rnn == 'lstm':
self.init_c = K.zeros((self.output_dim))
self.trainable_weights = self.trainable_weights + [self.init_c, ]
def _read(self, w, M):
return (w[:, :, None]*M).sum(axis=1)
def _write(self, w, e, a, M):
Mtilda = M * (1 - w[:, :, None]*e[:, None, :])
Mout = Mtilda + w[:, :, None]*a[:, None, :]
return Mout
def _get_content_w(self, beta, k, M):
num = beta[:, None] * _cosine_distance(M, k)
return _softmax(num)
def _get_location_w(self, g, s, C, gamma, wc, w_tm1):
wg = g[:, None] * wc + (1-g[:, None])*w_tm1
Cs = (C[None, :, :, :] * wg[:, None, None, :]).sum(axis=3)
wtilda = (Cs * s[:, :, None]).sum(axis=1)
wout = _renorm(wtilda ** gamma[:, None])
return wout
def _get_controller_output(self, h, W_k, b_k, W_c, b_c, W_s, b_s):
k = T.tanh(T.dot(h, W_k) + b_k) # + 1e-6
c = T.dot(h, W_c) + b_c
beta = T.nnet.relu(c[:, 0]) + 1e-4
g = T.nnet.sigmoid(c[:, 1])
gamma = T.nnet.relu(c[:, 2]) + 1.0001
s = T.nnet.softmax(T.dot(h, W_s) + b_s)
return k, beta, g, gamma, s
def get_initial_states(self, X):
batch_size = X.shape[0]
init_M = self.M.dimshuffle(0, 'x', 'x').repeat(
batch_size, axis=0).repeat(self.n_slots, axis=1).repeat(
self.m_length, axis=2)
init_M = init_M.flatten(ndim=2)
init_h = self.init_h.dimshuffle(('x', 0)).repeat(batch_size, axis=0)
init_wr = self.init_wr.dimshuffle(('x', 0)).repeat(batch_size, axis=0)
init_ww = self.init_ww.dimshuffle(('x', 0)).repeat(batch_size, axis=0)
if self.inner_rnn == 'lstm':
init_c = self.init_c.dimshuffle(('x', 0)).repeat(batch_size, axis=0)
return [init_M, T.nnet.softmax(init_wr), T.nnet.softmax(init_ww),
init_h, init_c]
else:
return [init_M, T.nnet.softmax(init_wr), T.nnet.softmax(init_ww),
init_h]
@property
def output_shape(self):
input_shape = self.input_shape
if self.return_sequences:
return input_shape[0], input_shape[1], self.output_dim
else:
return input_shape[0], self.output_dim
def get_full_output(self, train=False):
"""
This method is for research and visualization purposes. Use it as
X = model.get_input() # full model
Y = ntm.get_output() # this layer
F = theano.function([X], Y, allow_input_downcast=True)
[memory, read_address, write_address, rnn_state] = F(x)
if inner_rnn == "lstm" use it as
[memory, read_address, write_address, rnn_cell, rnn_state] = F(x)
"""
# input shape: (nb_samples, time (padded with zeros), input_dim)
X = self.get_input(train)
assert K.ndim(X) == 3
if K._BACKEND == 'tensorflow':
if not self.input_shape[1]:
raise Exception('When using TensorFlow, you should define ' +
'explicitely the number of timesteps of ' +
'your sequences. Make sure the first layer ' +
'has a "batch_input_shape" argument ' +
'including the samples axis.')
mask = self.get_output_mask(train)
if mask:
# apply mask
X *= K.cast(K.expand_dims(mask), X.dtype)
masking = True
else:
masking = False
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(X)
states = rnn_states(self.step, X, initial_states,
go_backwards=self.go_backwards,
masking=masking)
return states
def step(self, x, states):
M_tm1, wr_tm1, ww_tm1 = states[:3]
# reshape
M_tm1 = M_tm1.reshape((x.shape[0], self.n_slots, self.m_length))
# read
h_tm1 = states[3:]
k_read, beta_read, g_read, gamma_read, s_read = self._get_controller_output(
h_tm1[0], self.W_k_read, self.b_k_read, self.W_c_read, self.b_c_read,
self.W_s_read, self.b_s_read)
wc_read = self._get_content_w(beta_read, k_read, M_tm1)
wr_t = self._get_location_w(g_read, s_read, self.C, gamma_read,
wc_read, wr_tm1)
M_read = self._read(wr_t, M_tm1)
# update controller
h_t = _update_controller(self, x, h_tm1, M_read)
# write
k_write, beta_write, g_write, gamma_write, s_write = self._get_controller_output(
h_t[0], self.W_k_write, self.b_k_write, self.W_c_write,
self.b_c_write, self.W_s_write, self.b_s_write)
wc_write = self._get_content_w(beta_write, k_write, M_tm1)
ww_t = self._get_location_w(g_write, s_write, self.C, gamma_write,
wc_write, ww_tm1)
e = T.nnet.sigmoid(T.dot(h_t[0], self.W_e) + self.b_e)
a = T.tanh(T.dot(h_t[0], self.W_a) + self.b_a)
M_t = self._write(ww_t, e, a, M_tm1)
M_t = M_t.flatten(ndim=2)
return h_t[0], [M_t, wr_t, ww_t] + h_t
|
|
# Subset of test.support from CPython 3.5, just what we need to run asyncio
# test suite. The code is copied from CPython 3.5 to not depend on the test
# module because it is rarely installed.
# Ignore symbol TEST_HOME_DIR: test_events works without it
import functools
import gc
import os
import platform
import re
import socket
import subprocess
import sys
import time
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
return stderr
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
if '__isolated' in env_vars:
isolated = env_vars.pop('__isolated')
else:
isolated = not env_vars
cmd_line = [sys.executable, '-X', 'faulthandler']
if isolated and sys.version_info >= (3, 4):
# isolated mode: ignore Python environment variables, ignore user
# site-packages, and don't add the current directory to sys.path
cmd_line.append('-I')
elif not env_vars:
# ignore Python environment variables
cmd_line.append('-E')
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
env.update(env_vars)
cmd_line.extend(args)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
stderr) tuple.
If the __cleanenv keyword is set, env_vars is used a fresh environment.
Python is started in isolated mode (command line option -I),
except if the __isolated keyword is set to False.
"""
return _assert_python(True, *args, **env_vars)
is_jython = sys.platform.startswith('java')
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
HOST = "127.0.0.1"
HOSTv6 = "::1"
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR "
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
if reuse == 1:
raise TestFailed("tests should never set the SO_REUSEPORT "
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is
less than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version
is less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
# Use test.support if available
try:
from test.support import *
except ImportError:
pass
# Use test.script_helper if available
try:
from test.script_helper import assert_python_ok
except ImportError:
pass
|
|
import time
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.test import TestCase, Client
from django.test.utils import override_settings
from wagtail.wagtailcore.models import Site as WagtailSite
from gem.forms import GemRegistrationForm, GemEditProfileForm
from gem.models import GemSettings, GemCommentReport
from molo.commenting.forms import MoloCommentForm
from molo.commenting.models import MoloComment
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import SiteLanguageRelation, Main, Languages
class GemRegistrationViewTest(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.client = Client()
self.mk_main2()
def test_register_view(self):
response = self.client.get(reverse('user_register'))
self.assertTrue(isinstance(response.context['form'],
GemRegistrationForm))
def test_register_view_invalid_form(self):
# NOTE: empty form submission
response = self.client.post(reverse('user_register'), {
})
self.assertFormError(
response, 'form', 'username', ['This field is required.'])
self.assertFormError(
response, 'form', 'password', ['This field is required.'])
self.assertFormError(
response, 'form', 'gender', ['This field is required.'])
self.assertFormError(
response, 'form', 'security_question_1_answer',
['This field is required.']
)
self.assertFormError(
response, 'form', 'security_question_2_answer',
['This field is required.']
)
def test_email_or_phone_not_allowed_in_username(self):
response = self.client.post(reverse('user_register'), {
'username': '[email protected]',
'password': '1234',
'gender': 'm',
'security_question_1_answer': 'cat',
'security_question_2_answer': 'dog'
})
expected_validation_message = "Sorry, but that is an invalid" \
" username. Please don't use your" \
" email address or phone number in" \
" your username."
self.assertContains(response, expected_validation_message)
response = self.client.post(reverse('user_register'), {
'username': '0821231234',
'password': '1234',
'gender': 'm',
'security_question_1_answer': 'cat',
'security_question_2_answer': 'dog'
})
self.assertContains(response, expected_validation_message)
def test_successful_login_for_migrated_users(self):
user = User.objects.create_user(
username='1_newuser',
email='[email protected]',
password='newuser')
user.gem_profile.migrated_username = 'newuser'
user.gem_profile.save()
response = self.client.post('/profiles/login/?next=/', {
'username': 'newuser',
'password': 'newuser',
})
self.assertRedirects(response, '/')
client = Client(HTTP_HOST=self.site2.hostname)
response = client.post('/profiles/login/?next=/', {
'username': 'newuser',
'password': 'newuser',
})
self.assertContains(
response,
'Your username and password do not match. Please try again.')
def test_successful_login_for_migrated_users_in_site_2(self):
user = User.objects.create_user(
username='2_newuser',
email='[email protected]',
password='newuser2')
user.gem_profile.migrated_username = 'newuser'
user.gem_profile.save()
user.profile.site = self.site2
user.profile.save()
user3 = User.objects.create_user(
username='1_newuser',
email='[email protected]',
password='newuser1')
user3.gem_profile.migrated_username = 'newuser'
user3.gem_profile.save()
user3.profile.site = self.site
user3.profile.save()
response = self.client.post('/profiles/login/?next=/', {
'username': 'newuser',
'password': 'newuser2',
})
self.assertContains(
response,
'Your username and password do not match. Please try again.')
response = self.client.post('/profiles/login/?next=/', {
'username': 'newuser',
'password': 'newuser1',
})
self.assertRedirects(response, '/')
client = Client(HTTP_HOST=self.site2.hostname)
response = client.post('/profiles/login/?next=/', {
'username': 'newuser',
'password': 'newuser2',
})
self.assertRedirects(response, '/')
response = client.post('/profiles/login/?next=/', {
'username': 'newuser',
'password': 'newuser1',
})
self.assertContains(
response,
'Your username and password do not match. Please try again.')
class GemEditProfileViewTest(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.client = Client()
self.user = User.objects.create_user(
username='tester',
email='[email protected]',
password='tester')
self.client.login(username='tester', password='tester')
def test_edit_profile_view_uses_correct_form(self):
response = self.client.get(reverse('edit_my_profile'))
self.assertTrue(isinstance(response.context['form'],
GemEditProfileForm))
def test_email_or_phone_not_allowed_in_display_name(self):
response = self.client.post(reverse('edit_my_profile'), {
'alias': '[email protected]'
})
expected_validation_message = "Sorry, but that is an invalid display" \
" name. Please don't use your" \
" email address or phone number in" \
" your display name."
self.assertContains(response, expected_validation_message)
response = self.client.post(reverse('edit_my_profile'), {
'alias': '0821231234'
})
self.assertContains(response, expected_validation_message)
def test_offensive_language_not_allowed_in_display_name(self):
site = Site.objects.get(id=1)
site.name = 'GEM'
site.save()
GemSettings.objects.create(
site_id=site.id,
banned_names_with_offensive_language='naughty')
response = self.client.post(reverse('edit_my_profile'), {
'alias': 'naughty'
})
expected_validation_message = "Sorry, the name you have used is not " \
"allowed. Please, use a different name "\
"for your display name."
self.assertContains(response, expected_validation_message)
class GemResetPasswordTest(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.client = Client()
self.user = User.objects.create_user(
username='tester',
email='[email protected]',
password='tester')
self.user.gem_profile.set_security_question_1_answer('dog')
self.user.gem_profile.set_security_question_2_answer('cat')
self.user.gem_profile.save()
# to get the session set up
response = self.client.get(reverse('forgot_password'))
self.question_being_asked = settings.SECURITY_QUESTION_1 if \
settings.SECURITY_QUESTION_1 in response.content else \
settings.SECURITY_QUESTION_2
def post_invalid_username_to_forgot_password_view(self):
return self.client.post(reverse('forgot_password'), {
'username': 'invalid',
'random_security_question_answer': 'something'
})
def test_forgot_password_view_invalid_username(self):
response = self.post_invalid_username_to_forgot_password_view()
self.assertContains(response, 'The username that you entered appears '
'to be invalid. Please try again.')
def test_forgot_password_view_inactive_user(self):
self.user.is_active = False
self.user.save()
response = self.client.post(reverse('forgot_password'), {
'username': self.user.username,
'random_security_question_answer': 'something'
})
self.assertContains(response, 'This account is inactive.')
def post_invalid_answer_to_forgot_password_view(self):
return self.client.post(reverse('forgot_password'), {
'username': self.user.username,
'random_security_question_answer': 'invalid'
})
def test_forgot_password_view_invalid_answer(self):
response = self.post_invalid_answer_to_forgot_password_view()
self.assertContains(response, 'Your answer to the security question '
'was invalid. Please try again.')
def test_unsuccessful_username_attempts(self):
response = None
for x in range(6):
response = self.post_invalid_username_to_forgot_password_view()
# on the 6th attempt
self.assertContains(response, 'Too many attempts. Please try again '
'later.')
def test_unsuccessful_answer_attempts(self):
response = None
for x in range(6):
response = self.post_invalid_answer_to_forgot_password_view()
# on the 6th attempt
self.assertContains(response, 'Too many attempts. Please try again '
'later.')
def get_expected_token_and_redirect_url(self):
expected_token = default_token_generator.make_token(self.user)
expected_query_params = QueryDict(mutable=True)
expected_query_params['user'] = self.user.username
expected_query_params['token'] = expected_token
expected_redirect_url = '{0}?{1}'.format(
reverse('reset_password'), expected_query_params.urlencode()
)
return expected_token, expected_redirect_url
def proceed_to_reset_password_page(self):
if self.question_being_asked == settings.SECURITY_QUESTION_1:
answer = 'dog'
else:
answer = 'cat'
response = self.client.post(reverse('forgot_password'), {
'username': self.user.username,
'random_security_question_answer': answer
})
expected_token, expected_redirect_url = \
self.get_expected_token_and_redirect_url()
self.assertRedirects(response, expected_redirect_url)
return expected_token, expected_redirect_url
def test_reset_password_view_pin_mismatch(self):
expected_token, expected_redirect_url = \
self.proceed_to_reset_password_page()
response = self.client.post(expected_redirect_url, {
'username': self.user.username,
'token': expected_token,
'password': '1234',
'confirm_password': '4321'
})
self.assertContains(response, 'The two PINs that you entered do not '
'match. Please try again.')
def test_reset_password_view_requires_query_params(self):
response = self.client.get(reverse('reset_password'))
self.assertEqual(403, response.status_code)
def test_reset_password_view_invalid_username(self):
expected_token, expected_redirect_url = \
self.proceed_to_reset_password_page()
response = self.client.post(expected_redirect_url, {
'username': 'invalid',
'token': expected_token,
'password': '1234',
'confirm_password': '1234'
})
self.assertEqual(403, response.status_code)
def test_reset_password_view_inactive_user(self):
expected_token, expected_redirect_url = \
self.proceed_to_reset_password_page()
self.user.is_active = False
self.user.save()
response = self.client.post(expected_redirect_url, {
'username': self.user.username,
'token': expected_token,
'password': '1234',
'confirm_password': '1234'
})
self.assertEqual(403, response.status_code)
def test_reset_password_view_invalid_token(self):
expected_token, expected_redirect_url = \
self.proceed_to_reset_password_page()
response = self.client.post(expected_redirect_url, {
'username': self.user.username,
'token': 'invalid',
'password': '1234',
'confirm_password': '1234'
})
self.assertEqual(403, response.status_code)
def test_happy_path(self):
expected_token, expected_redirect_url = \
self.proceed_to_reset_password_page()
response = self.client.post(expected_redirect_url, {
'username': self.user.username,
'token': expected_token,
'password': '1234',
'confirm_password': '1234'
})
self.assertRedirects(response, reverse('reset_password_success'))
self.assertTrue(
self.client.login(username='tester', password='1234')
)
@override_settings(SESSION_COOKIE_AGE=1)
def test_session_expiration_allows_subsequent_attempts(self):
self.test_unsuccessful_username_attempts()
time.sleep(1)
response = self.client.post(reverse('forgot_password'), {
'username': 'invalid',
'random_security_question_answer': 'something'
})
# the view should redirect back to itself to set up a new session
self.assertRedirects(response, reverse('forgot_password'))
# follow the redirect
self.client.get(reverse('forgot_password'))
# now another attempt should be possible
self.test_forgot_password_view_invalid_username()
class CommentingTestCase(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.client = ()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.user = User.objects.create_user(
username='tester',
email='[email protected]',
password='tester')
self.superuser = User.objects.create_superuser(
username='admin',
email='[email protected]',
password='admin')
self.client = Client()
self.yourmind = self.mk_section(
self.section_index, title='Your mind')
self.article = self.mk_article(self.yourmind,
title='article 1',
subtitle='article 1 subtitle',
slug='article-1')
def create_comment(self, article, comment, user, parent=None):
return MoloComment.objects.create(
content_type=ContentType.objects.get_for_model(article),
object_pk=article.pk,
content_object=article,
site=Site.objects.get_current(),
user=user,
comment=comment,
parent=parent,
submit_date=datetime.now())
def getData(self):
return {
'name': self.user.username,
'email': self.user.email
}
def test_comment_shows_user_display_name(self):
# check when user doesn't have an alias
self.create_comment(self.article, 'test comment1 text', self.user)
response = self.client.get('/sections-main-1/your-mind/article-1/')
self.assertContains(response, "Anonymous")
# check when user have an alias
self.user.profile.alias = 'this is my alias'
self.user.profile.save()
self.create_comment(self.article, 'test comment2 text', self.user)
response = self.client.get('/sections-main-1/your-mind/article-1/')
self.assertContains(response, "this is my alias")
self.assertNotContains(response, "tester")
def test_comment_distinguishes_moderator_user(self):
self.user = User.objects.create_user(
username='foo',
email='[email protected]',
password='foo',
is_staff=True)
self.client.login(username='admin', password='admin')
response = self.client.get('/sections-main-1/your-mind/article-1/')
self.assertNotContains(response, "Big Sister")
self.assertNotContains(response, "Gabi")
self.create_comment(self.article, 'test comment1 text', self.superuser)
response = self.client.get('/sections-main-1/your-mind/article-1/')
self.assertContains(response, "Big Sister")
self.assertNotContains(response, "Gabi")
default_site = WagtailSite.objects.get(is_default_site=True)
setting = GemSettings.objects.get(site=default_site)
setting.moderator_name = 'Gabi'
setting.save()
response = self.client.get('/sections-main-1/your-mind/article-1/')
self.assertNotContains(response, "Big Sister")
self.assertContains(response, "Gabi")
def getValidData(self, obj):
form = MoloCommentForm(obj)
form_data = self.getData()
form_data.update(form.initial)
return form_data
def test_comment_filters(self):
site = Site.objects.get(id=1)
site.name = 'GEM'
site.save()
GemSettings.objects.create(site_id=site.id,
banned_keywords_and_patterns='naughty')
form_data = self.getValidData(self.article)
# check if user has typed in a number
comment_form = MoloCommentForm(
self.article, data=dict(form_data, comment="0821111111")
)
self.assertFalse(comment_form.is_valid())
# check if user has typed in an email address
comment_form = MoloCommentForm(
self.article, data=dict(form_data, comment="[email protected]")
)
self.assertFalse(comment_form.is_valid())
# check if user has used a banned keyword
comment_form = MoloCommentForm(
self.article, data=dict(form_data, comment="naughty")
)
self.assertFalse(comment_form.is_valid())
class GemFeedViewsTest(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.client = Client()
section = self.mk_section(self.section_index, title='Test Section')
self.article_page = self.mk_article(
section, title='Test Article',
subtitle='This should appear in the feed')
def test_rss_feed_view(self):
response = self.client.get(reverse('feed_rss'))
self.assertContains(response, self.article_page.title)
self.assertContains(response, self.article_page.subtitle)
self.assertNotContains(response, 'example.com')
def test_atom_feed_view(self):
response = self.client.get(reverse('feed_atom'))
self.assertContains(response, self.article_page.title)
self.assertContains(response, self.article_page.subtitle)
self.assertNotContains(response, 'example.com')
class GemReportCommentViewTest(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.user = User.objects.create_user(
username='tester',
email='[email protected]',
password='tester')
self.client.login(username='tester', password='tester')
self.content_type = ContentType.objects.get_for_model(self.user)
self.yourmind = self.mk_section(
self.section_index, title='Your mind')
self.article = self.mk_article(self.yourmind,
title='article 1',
subtitle='article 1 subtitle',
slug='article-1')
def create_comment(self, article, comment, parent=None):
return MoloComment.objects.create(
content_type=ContentType.objects.get_for_model(article),
object_pk=article.pk,
content_object=article,
site=Site.objects.get_current(),
user=self.user,
comment=comment,
parent=parent,
submit_date=datetime.now())
def create_reported_comment(self, comment, report_reason):
return GemCommentReport.objects.create(
comment=comment,
user=self.user,
reported_reason=report_reason
)
def test_report_view(self):
comment = self.create_comment(self.article, 'report me')
response = self.client.get(
reverse('report_comment', args=(comment.pk,))
)
self.assertContains(response, 'Please let us know why you are '
'reporting this comment?')
def test_user_has_already_reported_comment(self):
comment = self.create_comment(self.article, 'report me')
self.create_reported_comment(comment, 'Spam')
response = self.client.get(
reverse('report_comment', args=(comment.pk,)), follow=True
)
self.assertContains(response, 'You have already reported this comment')
|
|
import functools
from .errors import ClaripyOperationError
from .backend_object import BackendObject
def compare_bits(f):
@functools.wraps(f)
def compare_guard(self, o):
if self.bits != o.bits:
raise TypeError("bitvectors are differently-sized (%d and %d)" % (self.bits, o.bits))
return f(self, o)
return compare_guard
def normalize_types(f):
@functools.wraps(f)
def normalize_helper(self, o):
if hasattr(o, '__module__') and o.__module__ == 'z3':
raise ValueError("this should no longer happen")
if type(o) in (int, long):
o = BVV(o, self.bits)
if type(self) in (int, long):
self = BVV(self, self.bits)
if not isinstance(self, BVV) or not isinstance(o, BVV):
return NotImplemented
return f(self, o)
return normalize_helper
class BVV(BackendObject):
__slots__ = [ 'bits', '_value', 'mod', 'value' ]
def __init__(self, value, bits):
if bits == 0 or type(bits) not in (int, long) or type(value) not in (int, long):
raise ClaripyOperationError("BVV needs a non-zero length and an int/long value")
self.bits = bits
self._value = 0
self.mod = 2**bits
self.value = value
def __hash__(self):
return hash((self.value, self.bits))
def __getstate__(self):
return (self.bits, self.value)
def __setstate__(self, s):
self.bits = s[0]
self.mod = 2**self.bits
self.value = s[1]
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v % self.mod
@property
def signed(self):
return self._value if self._value < self.mod/2 else self._value % (self.mod/2) - (self.mod/2)
@signed.setter
def signed(self, v):
self._value = v % -self.mod
#
# Arithmetic stuff
#
@normalize_types
@compare_bits
def __add__(self, o):
return BVV(self.value + o.value, self.bits)
@normalize_types
@compare_bits
def __sub__(self, o):
return BVV(self.value - o.value, self.bits)
@normalize_types
@compare_bits
def __mul__(self, o):
return BVV(self.value * o.value, self.bits)
@normalize_types
@compare_bits
def __mod__(self, o):
return BVV(self.value % o.value, self.bits)
@normalize_types
@compare_bits
def __div__(self, o):
return BVV(self.value / o.value, self.bits)
#
# Reverse arithmetic stuff
#
@normalize_types
@compare_bits
def __radd__(self, o):
return BVV(self.value + o.value, self.bits)
@normalize_types
@compare_bits
def __rsub__(self, o):
return BVV(o.value - self.value, self.bits)
@normalize_types
@compare_bits
def __rmul__(self, o):
return BVV(self.value * o.value, self.bits)
@normalize_types
@compare_bits
def __rmod__(self, o):
return BVV(o.value % self.value, self.bits)
@normalize_types
@compare_bits
def __rdiv__(self, o):
return BVV(o.value / self.value, self.bits)
#
# Bit operations
#
@normalize_types
@compare_bits
def __and__(self, o):
return BVV(self.value & o.value, self.bits)
@normalize_types
@compare_bits
def __or__(self, o):
return BVV(self.value | o.value, self.bits)
@normalize_types
@compare_bits
def __xor__(self, o):
return BVV(self.value ^ o.value, self.bits)
@normalize_types
@compare_bits
def __lshift__(self, o):
return BVV(self.value << o.signed, self.bits)
@normalize_types
@compare_bits
def __rshift__(self, o):
return BVV(self.signed >> o.signed, self.bits)
def __invert__(self):
return BVV(self.value ^ self.mod-1, self.bits)
#
# Reverse bit operations
#
@normalize_types
@compare_bits
def __rand__(self, o):
return BVV(self.value & o.value, self.bits)
@normalize_types
@compare_bits
def __ror__(self, o):
return BVV(self.value | o.value, self.bits)
@normalize_types
@compare_bits
def __rxor__(self, o):
return BVV(self.value ^ o.value, self.bits)
@normalize_types
@compare_bits
def __rlshift__(self, o):
return BVV(o.value << self.signed, self.bits)
@normalize_types
@compare_bits
def __rrshift__(self, o):
return BVV(o.signed >> self.signed, self.bits)
#
# Boolean stuff
#
@normalize_types
@compare_bits
def __eq__(self, o):
return self.value == o.value
@normalize_types
@compare_bits
def __ne__(self, o):
return self.value != o.value
@normalize_types
@compare_bits
def __lt__(self, o):
return self.value < o.value
@normalize_types
@compare_bits
def __gt__(self, o):
return self.value > o.value
@normalize_types
@compare_bits
def __le__(self, o):
return self.value <= o.value
@normalize_types
@compare_bits
def __ge__(self, o):
return self.value >= o.value
#
# Conversions
#
def size(self):
return self.bits
def __repr__(self):
return 'BVV(0x%x, %d)' % (self.value, self.bits)
#
# External stuff
#
def BitVecVal(value, bits):
return BVV(value, bits)
def ZeroExt(num, o):
return BVV(o.value, o.bits + num)
def SignExt(num, o):
return BVV(o.signed, o.bits + num)
def Extract(f, t, o):
return BVV((o.value >> t) & (2**(f+1) - 1), f-t+1)
def Concat(*args):
total_bits = 0
total_value = 0
for o in args:
total_value = (total_value << o.bits) | o.value
total_bits += o.bits
return BVV(total_value, total_bits)
def RotateRight(self, bits):
return LShR(self, bits) | (self << (self.size()-bits))
def RotateLeft(self, bits):
return (self << bits) | (LShR(self, (self.size()-bits)))
def Reverse(a):
if a.size() == 8:
return a
elif a.size() % 8 != 0:
raise ClaripyOperationError("can't reverse non-byte sized bitvectors")
else:
return Concat(*[Extract(i+7, i, a) for i in range(0, a.size(), 8)])
@normalize_types
@compare_bits
def ULT(self, o):
return self.value < o.value
@normalize_types
@compare_bits
def UGT(self, o):
return self.value > o.value
@normalize_types
@compare_bits
def ULE(self, o):
return self.value <= o.value
@normalize_types
@compare_bits
def UGE(self, o):
return self.value >= o.value
@normalize_types
@compare_bits
def SLT(self, o):
return self.signed < o.signed
@normalize_types
@compare_bits
def SGT(self, o):
return self.signed > o.signed
@normalize_types
@compare_bits
def SLE(self, o):
return self.signed <= o.signed
@normalize_types
@compare_bits
def SGE(self, o):
return self.signed >= o.signed
#
# Pure boolean stuff
#
def BoolVal(b):
return b
def And(*args):
return all(args)
def Or(*args):
return any(args)
def Not(b):
return not b
@normalize_types
def normalizer(*args):
return args
def If(c, t, f):
t,f = normalizer(t,f) #pylint:disable=unbalanced-tuple-unpacking
if c: return t
else: return f
@normalize_types
@compare_bits
def LShR(a, b):
return BVV(a.value >> b.signed, a.bits)
def test():
a = BVV(1, 8)
b = BVV(2, 8)
assert a | b == 3
assert a & b == 0
assert a / b == 0
assert b * b == 4
assert a.signed == a.value
assert a + 8 == 9
c = BVV(128, 8)
assert c.signed == -128
d = BVV(255, 8)
assert Extract(1, 0, d) == 3
assert SignExt(8, d).value == 2**16-1
assert ZeroExt(8, d).size() == 16
assert ZeroExt(8, d).value == 255
e = BVV(0b1010, 4)
f = BVV(0b11, 2)
assert Concat(e, e, e, e) == 0b1010101010101010
assert Concat(e,f,f) == 0b10101111
if __name__ == '__main__':
test()
|
|
"""
Copyright 2015 Austin Ankney, Ming Fang, Wenjun Wang and Yao Zhou
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file defines the concrete control flow logic
This script extracts features from the query.
=============================================
Usage:
This file cannot run standalone, the functions will be used in other scripts,
such as "train.py" and "classify.py"
TODO([email protected]):
- Consider encoding issue
Author: Wenjun Wang
Date: June 28, 2015
"""
import nltk
import hashlib
import numpy as np
def stopword(stpfile):
"""Reads stopwords from a file and return a set of stopwords
"""
stopwords = set()
for line in open(stpfile):
stopwords.add(line.strip())
return stopwords
def parse_options(options=''):
"""parse feature options, i.e. which types of features need to extract
Arg:
options: a string of feature options in the format like: '-uni -pos2'
Return:
feature_arg: a dictionary of feature options, key: feature name, value: True/False
"""
argv = options.split()
feature_arg = {}
feature_arg['unigram'] = False
feature_arg['POS'] = False
feature_arg['POSbigram'] = False
feature_arg['stem'] = False
feature_arg['stopword_removal'] = False
for i in xrange(0,len(argv)):
if argv[i].lower()[:4] == '-uni':
feature_arg['unigram'] = True
if argv[i].lower()[:6] == '-pos2':
feature_arg['POSbigram'] = True
feature_arg['POS'] = True
if argv[i].lower()[:6] == '-stprm':
feature_arg['stopword_removal'] = True
if argv[i].lower() == '-stem':
feature_arg['stem'] = True
return feature_arg
def feature_generator(query, stopwords, feature_arg):
"""Generate a feature set from the query
Args:
query: the query need to extract features from
stopwords: a set of stopwords
feature_arg: returned by parse_options,
contains info of which types of features need to be extract
Return:
features: a set of features
"""
features = set()
token_list = nltk.word_tokenize(query.lower())
if feature_arg['POS'] == True:
token_list = nltk.pos_tag(token_list)
if feature_arg['stopword_removal'] == True:
token_list = _stopword_removal(token_list, stopwords)
if feature_arg['stem'] == True:
token_list = _stemming(token_list)
if feature_arg['unigram'] == True:
_ngram(1, token_list, features)
if feature_arg['POSbigram'] == True:
_POSngram(2, token_list, features)
return features
def _ngram(n, token_list, features):
"""Extract ngram features
Currently, only implements unigram
This function is called by feature_generator
Args:
n: n=1 unigram, n=2 bigram, n=3 trigram
token_list: a list of tokens of a query
features: feature set need to update
"""
if n == 1:
for t in token_list:
if isinstance(t,tuple):
features |= set([t[0]])
elif isinstance(t,str):
features |= set([t])
def _POSngram(n, tag_list, features):
"""Extract POSngram features
Currently, only implements POSbigram
This function is called by feature_generator
Args:
n: n=1 POSunigram, n=2 POSbigram, n=3 POStrigram
tag_list: a list of (token, POStag) tuples of the query
features: feature set need to update
"""
features |= set(['START_'+tag_list[0][1]])
if n == 2:
for i in xrange(0,len(tag_list)-1):
features |= set([tag_list[i][1]+'_'+tag_list[i+1][1]])
features |= set([tag_list[-1][1]+'_END'])
def _stemming(token_list):
"""Stem all words in the list
Arg:
token_list: a list of tokens of a query
OR a list of (token, POStag) tuples of the query
Return:
stemmed_tokens: a list of stemmed tokens of a query
OR a list of (stemmed_token, POStag) tuples of the query
"""
porter = nltk.PorterStemmer()
if isinstance(token_list[0],str):
stemmed_tokens = [porter.stem(t) for t in token_list]
elif isinstance(token_list[0],tuple):
stemmed_tokens = [(porter.stem(t[0]),t[1]) for t in token_list]
return stemmed_tokens
def _stopword_removal(token_list, stopwords):
"""Remove all stopwords in a sentence
Arg:
token_list: a list of tokens of a query
OR a list of (token, POStag) tuples of the query
Return:
clean_tokens: stopwords-removed version of original token_list
"""
clean_tokens = []
while len(token_list) > 0:
if isinstance(token_list[0],str):
target = token_list[0].lower()
elif isinstance(token_list[0],tuple):
target = token_list[0][0].lower()
if target in stopwords:
token_list.pop(0)
else:
clean_tokens.append(token_list.pop(0))
return clean_tokens
def hashit(text, dictionary_size=1000):
'''
Takes a sentence, tokenizes it, stems each word, and hashes each word
based on the dictionary size specified.
'''
stemmer = nltk.SnowballStemmer("english", ignore_stopwords=True)
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(unicode(text, errors='ignore'))
x_i = [0] * dictionary_size
for token in tokens:
stemmed = stemmer.stem(token.lower())
if not stemmed in nltk.corpus.stopwords.words('english') and len(stemmed) > 1:
hasher = hashlib.sha1()
hasher.update(stemmed)
index = int(hasher.hexdigest(), 16) % dictionary_size
x_i[index] += 1
return x_i
def list2Vec(word_list):
'''
Converts a list into a numpy vector/matrix
'''
a = np.array(word_list)
return a
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import ast
import pprint
import mxnet as mx
from mxnet.module import Module
from symdata.bbox import im_detect
from symdata.loader import load_test, generate_batch
from symdata.vis import vis_detection
from symnet.model import load_param, check_shape
def demo_net(sym, class_names, args):
# print config
print('called with args\n{}'.format(pprint.pformat(vars(args))))
# setup context
if args.gpu:
ctx = mx.gpu(int(args.gpu))
else:
ctx = mx.cpu(0)
# load single test
im_tensor, im_info, im_orig = load_test(args.image, short=args.img_short_side, max_size=args.img_long_side,
mean=args.img_pixel_means, std=args.img_pixel_stds)
# generate data batch
data_batch = generate_batch(im_tensor, im_info)
# load params
arg_params, aux_params = load_param(args.params, ctx=ctx)
# produce shape max possible
data_names = ['data', 'im_info']
label_names = None
data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))]
label_shapes = None
# check shapes
check_shape(sym, data_shapes, arg_params, aux_params)
# create and bind module
mod = Module(sym, data_names, label_names, context=ctx)
mod.bind(data_shapes, label_shapes, for_training=False)
mod.init_params(arg_params=arg_params, aux_params=aux_params)
# forward
mod.forward(data_batch)
rois, scores, bbox_deltas = mod.get_outputs()
rois = rois[:, 1:]
scores = scores[0]
bbox_deltas = bbox_deltas[0]
im_info = im_info[0]
# decode detection
det = im_detect(rois, scores, bbox_deltas, im_info,
bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh,
conf_thresh=args.rcnn_conf_thresh)
# print out
for [cls, conf, x1, y1, x2, y2] in det:
if cls > 0 and conf > args.vis_thresh:
print(class_names[int(cls)], conf, [x1, y1, x2, y2])
# if vis
if args.vis:
vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)
def parse_args():
parser = argparse.ArgumentParser(description='Demonstrate a Faster R-CNN network',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--network', type=str, default='vgg16', help='base network')
parser.add_argument('--params', type=str, default='', help='path to trained model')
parser.add_argument('--dataset', type=str, default='voc', help='training dataset')
parser.add_argument('--image', type=str, default='', help='path to test image')
parser.add_argument('--gpu', type=str, default='', help='GPU devices, eg."0,1,2,3" , not set to use CPU.')
parser.add_argument('--vis', action='store_true', help='display results')
parser.add_argument('--vis-thresh', type=float, default=0.7, help='threshold display boxes')
# faster rcnn params
parser.add_argument('--img-short-side', type=int, default=600)
parser.add_argument('--img-long-side', type=int, default=1000)
parser.add_argument('--img-pixel-means', type=str, default='(0.0, 0.0, 0.0)')
parser.add_argument('--img-pixel-stds', type=str, default='(1.0, 1.0, 1.0)')
parser.add_argument('--rpn-feat-stride', type=int, default=16)
parser.add_argument('--rpn-anchor-scales', type=str, default='(8, 16, 32)')
parser.add_argument('--rpn-anchor-ratios', type=str, default='(0.5, 1, 2)')
parser.add_argument('--rpn-pre-nms-topk', type=int, default=6000)
parser.add_argument('--rpn-post-nms-topk', type=int, default=300)
parser.add_argument('--rpn-nms-thresh', type=float, default=0.7)
parser.add_argument('--rpn-min-size', type=int, default=16)
parser.add_argument('--rcnn-num-classes', type=int, default=21)
parser.add_argument('--rcnn-feat-stride', type=int, default=16)
parser.add_argument('--rcnn-pooled-size', type=str, default='(14, 14)')
parser.add_argument('--rcnn-batch-size', type=int, default=1)
parser.add_argument('--rcnn-bbox-stds', type=str, default='(0.1, 0.1, 0.2, 0.2)')
parser.add_argument('--rcnn-nms-thresh', type=float, default=0.3)
parser.add_argument('--rcnn-conf-thresh', type=float, default=1e-3)
args = parser.parse_args()
args.img_pixel_means = ast.literal_eval(args.img_pixel_means)
args.img_pixel_stds = ast.literal_eval(args.img_pixel_stds)
args.rpn_anchor_scales = ast.literal_eval(args.rpn_anchor_scales)
args.rpn_anchor_ratios = ast.literal_eval(args.rpn_anchor_ratios)
args.rcnn_pooled_size = ast.literal_eval(args.rcnn_pooled_size)
args.rcnn_bbox_stds = ast.literal_eval(args.rcnn_bbox_stds)
return args
def get_voc_names(args):
from symimdb.pascal_voc import PascalVOC
args.rcnn_num_classes = len(PascalVOC.classes)
return PascalVOC.classes
def get_coco_names(args):
from symimdb.coco import coco
args.rcnn_num_classes = len(coco.classes)
return coco.classes
def get_vgg16_test(args):
from symnet.symbol_vgg import get_vgg_test
if not args.params:
args.params = 'model/vgg16-0010.params'
args.img_pixel_means = (123.68, 116.779, 103.939)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.net_fixed_params = ['conv1', 'conv2']
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (7, 7)
return get_vgg_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size)
def get_resnet50_test(args):
from symnet.symbol_resnet import get_resnet_test
if not args.params:
args.params = 'model/resnet50-0010.params'
args.img_pixel_means = (0.0, 0.0, 0.0)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (14, 14)
return get_resnet_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
units=(3, 4, 6, 3), filter_list=(256, 512, 1024, 2048))
def get_resnet101_test(args):
from symnet.symbol_resnet import get_resnet_test
if not args.params:
args.params = 'model/resnet101-0010.params'
args.img_pixel_means = (0.0, 0.0, 0.0)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (14, 14)
return get_resnet_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
units=(3, 4, 23, 3), filter_list=(256, 512, 1024, 2048))
def get_class_names(dataset, args):
datasets = {
'voc': get_voc_names,
'coco': get_coco_names
}
if dataset not in datasets:
raise ValueError("dataset {} not supported".format(dataset))
return datasets[dataset](args)
def get_network(network, args):
networks = {
'vgg16': get_vgg16_test,
'resnet50': get_resnet50_test,
'resnet101': get_resnet101_test
}
if network not in networks:
raise ValueError("network {} not supported".format(network))
return networks[network](args)
def main():
args = parse_args()
class_names = get_class_names(args.dataset, args)
sym = get_network(args.network, args)
demo_net(sym, class_names, args)
if __name__ == '__main__':
main()
|
|
"""
Summary:
Ief file data holder.
Contains the functionality for loading ISIS .ief files from disk.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
Updates:
"""
import os
from ship.utils import utilfunctions as uf
from ship.utils import filetools as ft
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class IefDataTypes(object):
"""Enum for the different data types within the Ief class.
Use these for easy access of the Ief class.
"""
HEADER, DETAILS, IED_DATA, SNAPSHOTS, DESCRIPTION = range(5)
class Ief(object):
"""Contains the details in the in the IEF file.
Class data and a methods for accessing and upating the .ief file.
"""
def __init__(self, path_holder, header, details, snapshots=None,
ied_data=None, description=None):
"""Constructor.
Args:
path_holder (PathHolder): Object containing the file path to this
ief file.
header: The [Event header] section of th ief file. It contains
data like the title and main filepaths.
details: The [Event Details] section of the ief file. It
contains almost all the other data in the ief file, including
all the flags for the run.
snapshots: List containing a dictionary in each element that
has the snapshot time and filepath.
ied_data: List containing a dictionary in each element that
contains the title and file path for every ied file referenced
in the ief file.
description: List containing the line of the description
section of the file.
"""
self.event_header = header
self.event_details = details
self.snapshots = snapshots
self.ied_data = ied_data
self.description = description
self.path_holder = path_holder
def getFilePaths(self):
"""Returns all the file paths that occur in the ief file.
Most paths are extracted from the head and details data, when they
exist, and are added to paths_dict. If any ied data or snapshot data
exists it will be added as a list to the dictionary.
If a particular path is not found the value will be set to None, unless,
it's ied or snapshot data in which case it will be an empty list.
Dict keys are: Datafile, Results, InitialConditions, 2DFile, ied,
and snapshots.
Returns:
dict - containing all of the path data stored by this object.
"""
paths_dict = {}
try:
paths_dict['Datafile'] = self._findVarInDictionary(self.event_header, 'Datafile')
except:
paths_dict['Datafile'] = None
try:
paths_dict['Results'] = self._findVarInDictionary(self.event_header, 'Results')
except:
paths_dict['Results'] = None
try:
paths_dict['InitialConditions'] = self._findVarInDictionary(self.event_details, 'InitialConditions')
except:
paths_dict['InitialConditions'] = None
try:
paths_dict['2DFile'] = self._findVarInDictionary(self.event_details, '2DFile')
except:
paths_dict['2DFile'] = None
if not self.ied_data is None and not self.ied_data == []:
ied_paths = [ied['file'] for ied in self.ied_data]
paths_dict['ieds'] = ied_paths
else:
paths_dict['ieds'] = []
if not self.snapshots is None and not self.snapshots == []:
snapshot_paths = [snap['file'] for snap in self.snapshots]
paths_dict['snapshots'] = snapshot_paths
else:
paths_dict['snapshots'] = []
return paths_dict
def getValue(self, key):
"""Get a value from one of the variables dictionaries.
All single variables (i.e. not lists like ied data) are stored in two
main dictionaries. This method will return the value associated with
the given key from whichever dictionary it is stored in.
Args:
key(str): dict key for value. For a list of available keys use the
getAvailableKeys method.
Return:
string: value referenced by the given key, in the ief file.
Raises:
KeyError: if the given key does not exist.
"""
if key in self.event_header.keys():
return self.event_header[key]
elif key in self.event_details.keys():
return self.event_details[key]
def getIedData(self):
"""Get all of the ied data stored in this object.
There can be multiple ied files referenced by an ief. This will return
a dictionary containing all of them.
If no ied files are included in the ief file the returned list will
be empty.
Returns:
dict - containing {ied_name: ied_path} for all ied files referenced.
"""
if self.ied_data == None:
return []
else:
return self.ied_data
def getSnapshots(self):
"""Get all of the snapshot data stored in this object.
There can be multiple snapshot files referenced by an ief. This will return
a dictionary containing all of them.
If no snapshots are included in the ief file the returned list will
be empty.
Returns:
dict - containing {snapshot_time: snapshot_path} for all snapshot
files referenced.
"""
if self.snapshots == None:
return []
else:
self.snapshots
def getDescription(self):
"""Returns the description component of the ief."""
return self.description
def setValue(self, key, value):
"""Set the value of one of dictionary entries in the ief.
Args:
key(str): The key of the value to update.
value(str(: the value to update.
Raises:
KeyError: if given key is not recongised.
Warning:
Currently no checks are made on the validity of the the key given
this is because it may be a legal key, but not yet exist in the
dictionary. To fix this a list of all valid keys should be created
and checked here before setting the value. These are the keys used
in the ief file.
"""
headlist = ['Title', 'Path', 'Datafile', 'Results']
if key in headlist:
self.event_header[key] = value
else:
self.event_details[key] = value
def addIedFile(self, ied_path, name=''):
"""Add a new ied file.
Args:
ied_path(str): path to an ied file.
name=''(str): name for the ied file.
"""
if self.ied_data is None:
self.ied_data = []
self.ied_data.append({'name': name, 'file': ied_path})
def addSnapshot(self, snapshot_path, time):
"""Add a new snapshot.
Args:
snapshot_path(str): the path for the snapshot.
time(float): the time to assign to the snapshot.
"""
if self.snapshots is None:
self.snapshots = []
if not uf.isNumeric(time):
raise ValueError('time is not a numeric value')
self.snapshots.append({'time': time, 'file': snapshot_path})
def _findVarInDictionary(self, the_dict, key):
"""Returns the variable in a dictionary.
Tests to see if a variables exists under the given key in the given
dictionary. If it does it will return it.
Args:
the_dict (Dict): Dictionary in which to check the keys existence.
key (str): Key to look for in the dictionary.
Returns:
The requested variable if it exists or False if not.
"""
try:
variable = the_dict[key]
except KeyError:
logger.debug('No ' + key + ' key found in ief')
return False
return variable
def getPrintableContents(self):
"""Return the contents of the file for printing.
Formats the contents of this Ief instance ready to be written back
to file.
Returns:
List of the formatted lines for printing to file.
TODO:
This function is a bit long and messy at the moment. Could do
with a good refactoring.
"""
contents = []
# Add the header data in a specific order
headlist = ['Title', 'Path', 'Datafile', 'Results']
contents.append('[ISIS Event Header]')
for h in headlist:
var = self._findVarInDictionary(self.event_header, h)
if not var == False:
contents.append(h + '=' + var)
# Add the top of the event list
event_start = ['RunType', 'InitialConditions', 'Start', 'Finish',
'Timestep', 'SaveInterval']
contents.append('[ISIS Event Details]')
for s in event_start:
var = self._findVarInDictionary(self.event_details, s)
if not var == False:
contents.append(s + '=' + var)
# Add snapshot stuff
if not self.snapshots == None:
for s in self.snapshots:
contents.append('SnapshotTime=' + s['time'])
contents.append('SnapshotFile=' + s['file'])
# Add ied stuff
if not self.ied_data == None:
for d in self.ied_data:
contents.append(';' + d['name'])
contents.append('EventData=' + d['file'])
# Now throw in everything else
for key, value in self.event_details.items():
if not key in event_start:
contents.append(key + '=' + value)
# Finally, if there's a description add it on.
if not self.description == None and not len(self.description) < 1 \
and not self.description[0] == '':
contents.append('[Description]')
for i, d in enumerate(self.description):
contents.append(d)
return contents
def write(self, filepath=None, overwrite=False):
"""Write the contents of this file to disk.
Writes out to file in the format required for reading by ISIS/FMP.
Note:
If a filepath is not provided and the settings in this objects
PathHolder class have not been updated you will write over the
file that was loaded.
Args:
filepath=None(str): if a filename is provided it the file will be
written to that location. If not, the current settings in this
object path_holder object will be used.
overwrite=False(bool): if the file already exists it will raise
an IOError.
Raises:
IOError - If unable to write to file.
"""
if filepath is None:
filepath = self.path_holder.absolutePath()
if not overwrite and os.path.exists(filepath):
raise IOError('filepath %s already exists. Set overwrite=True to ignore this warning.' % filepath)
contents = self.getPrintableContents()
ft.writeFile(contents, filepath)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.forms import (CharField, DateField, FileField, Form, IntegerField,
SplitDateTimeField, ValidationError, formsets)
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.test import TestCase
class Choice(Form):
choice = CharField()
votes = IntegerField()
# FormSet allows us to use multiple instance of the same form on 1 page. For now,
# the best way to create a FormSet is by using the formset_factory function.
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError("Clean method called")
# Let's define a FormSet that takes a list of favorite drinks, but raises an
# error if there are any duplicates. Used in ``test_clean_hook``,
# ``test_regression_6926`` & ``test_regression_12878``.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm,
formset=BaseFavoriteDrinksFormSet, extra=3)
# Used in ``test_formset_splitdatetimefield``.
class SplitDateTimeForm(Form):
when = SplitDateTimeField(initial=datetime.datetime.now)
SplitDateTimeFormSet = formset_factory(SplitDateTimeForm)
class FormsFormsetTestCase(TestCase):
def make_choiceformset(self, formset_data=None, formset_class=ChoiceFormSet,
total_forms=None, initial_forms=0, max_num_forms=0, min_num_forms=0, **kwargs):
"""
Make a ChoiceFormset from the given formset_data.
The data should be given as a list of (choice, votes) tuples.
"""
kwargs.setdefault('prefix', 'choices')
kwargs.setdefault('auto_id', False)
if formset_data is None:
return formset_class(**kwargs)
if total_forms is None:
total_forms = len(formset_data)
def prefixed(*args):
args = (kwargs['prefix'],) + args
return '-'.join(args)
data = {
prefixed('TOTAL_FORMS'): str(total_forms),
prefixed('INITIAL_FORMS'): str(initial_forms),
prefixed('MAX_NUM_FORMS'): str(max_num_forms),
prefixed('MIN_NUM_FORMS'): str(min_num_forms),
}
for i, (choice, votes) in enumerate(formset_data):
data[prefixed(str(i), 'choice')] = choice
data[prefixed(str(i), 'votes')] = votes
return formset_class(data, **kwargs)
def test_basic_formset(self):
# A FormSet constructor takes the same arguments as Form. Let's create a FormSet
# for adding data. By default, it displays 1 blank form. It can display more,
# but we'll look at how to do so later.
formset = self.make_choiceformset()
self.assertHTMLEqual(str(formset), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MIN_NUM_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="1000" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" /></td></tr>""")
# We treat FormSet pretty much like we would treat a normal Form. FormSet has an
# is_valid method, and a cleaned_data or errors attribute depending on whether all
# the forms passed validation. However, unlike a Form instance, cleaned_data and
# errors will be a list of dicts rather than just a single dict.
formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}])
# If a FormSet was not passed any data, its is_valid and has_changed
# methods should return False.
formset = self.make_choiceformset()
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.']}])
def test_formset_has_changed(self):
# FormSet instances has_changed method will be True if any data is
# passed to his forms, even if the formset didn't validate
blank_formset = self.make_choiceformset([('', '')])
self.assertFalse(blank_formset.has_changed())
# invalid formset test
invalid_formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(invalid_formset.is_valid())
self.assertTrue(invalid_formset.has_changed())
# valid formset test
valid_formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(valid_formset.is_valid())
self.assertTrue(valid_formset.has_changed())
def test_formset_initial_data(self):
# We can also prefill a FormSet with existing data by providing an ``initial``
# argument to the constructor. ``initial`` should be a list of dicts. By default,
# an extra blank form is included.
initial = [{'choice': 'Calexico', 'votes': 100}]
formset = self.make_choiceformset(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>""")
# Let's simulate what would happen if we submitted this form.
formset = self.make_choiceformset([('Calexico', '100'), ('', '')], initial_forms=1)
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}])
def test_second_form_partially_filled(self):
# But the second form was blank! Shouldn't we get some errors? No. If we display
# a form as blank, it's ok for it to be submitted as blank. If we fill out even
# one of the fields of a blank form though, it will be validated. We may want to
# required that at least x number of forms are completed, but we'll show how to
# handle that later.
formset = self.make_choiceformset([('Calexico', '100'), ('The Decemberists', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}])
def test_delete_prefilled_data(self):
# If we delete data that was pre-filled, we should get an error. Simply removing
# data from form fields isn't the proper way to delete it. We'll see how to
# handle that case later.
formset = self.make_choiceformset([('', ''), ('', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.'], 'choice': ['This field is required.']}, {}])
def test_displaying_more_than_one_blank_form(self):
# Displaying more than 1 blank form ###########################################
# We can also display more than 1 empty form at a time. To do so, pass a
# extra argument to formset_factory.
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>""")
# Since we displayed every form as blank, we will also accept them back as blank.
# This may seem a little strange, but later we will show how to require a minimum
# number of forms to be completed.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_min_num_displaying_more_than_one_blank_form(self):
# We can also display more than 1 empty form passing min_num argument
# to formset_factory. It will (essentially) increment the extra argument
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=1)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
# Min_num forms are required; extra forms can be empty.
self.assertFalse(formset.forms[0].empty_permitted)
self.assertTrue(formset.forms[1].empty_permitted)
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>""")
def test_min_num_displaying_more_than_one_blank_form_with_zero_extra(self):
# We can also display more than 1 empty form passing min_num argument
ChoiceFormSet = formset_factory(Choice, extra=0, min_num=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>""")
def test_single_form_completed(self):
# We can just fill out one of the forms.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}, {}])
def test_formset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
def test_formset_validate_min_flag(self):
# If validate_min is set and min_num is more than TOTAL_FORMS in the
# data, then throw an exception. MIN_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 3 or more forms.'])
def test_second_form_partially_filled_2(self):
# And once again, if we try to partially complete a form, validation will fail.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}, {}])
def test_more_initial_data(self):
# The extra argument also works when the formset is pre-filled with initial
# data.
initial = [{'choice': 'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>""")
# Make sure retrieving an empty form works, and it shows up in the form list
self.assertTrue(formset.empty_form.empty_permitted)
self.assertHTMLEqual(formset.empty_form.as_ul(), """<li>Choice: <input type="text" name="choices-__prefix__-choice" /></li>
<li>Votes: <input type="number" name="choices-__prefix__-votes" /></li>""")
def test_formset_with_deletion(self):
# FormSets with deletion ######################################################
# We can easily add deletion ability to a FormSet with an argument to
# formset_factory. This will add a boolean field to each form instance. When
# that boolean field is True, the form will be in formset.deleted_forms
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>""")
# To delete something, we just need to set that form's special delete field to
# 'on'. Let's go ahead and delete Fergie.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'DELETE': False, 'choice': 'Calexico'}, {'votes': 900, 'DELETE': True, 'choice': 'Fergie'}, {}])
self.assertEqual([form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'choice': 'Fergie'}])
# If we fill a form with something and then we check the can_delete checkbox for
# that form, that form's errors should not make the entire formset invalid since
# it's going to be deleted.
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3', # the number of forms rendered
'check-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'check-MAX_NUM_FORMS': '0', # max number of forms
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
# If we remove the deletion flag now we will have our validation back.
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
# Should be able to get deleted_forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(
form=Person,
can_delete=True)
p = PeopleForm(
{'form-0-name': '', 'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0, 'form-MAX_NUM_FORMS': 1})
self.assertTrue(p.is_valid())
self.assertEqual(len(p.deleted_forms), 1)
def test_formsets_with_ordering(self):
# FormSets with ordering ######################################################
# We can also add ordering ability to a FormSet with an argument to
# formset_factory. This will add an integer field to each form instance. When
# form validation succeeds, [form.cleaned_data for form in formset.forms] will have the data in the correct
# order specified by the ordering fields. If a number is duplicated in the set
# of ordering fields, for instance form 0 and form 3 are both marked as 1, then
# the form index used as a secondary ordering criteria. In order to put
# something at the front of the list, you'd need to set it's order to 0.
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" /></li>""")
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
])
def test_empty_ordered_fields(self):
# Ordering fields are allowed to be left blank, and if they *are* left blank,
# they will be sorted below everything else.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': 'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': 'Basia Bulat'},
])
def test_ordering_blank_fieldsets(self):
# Ordering should work with blank fieldsets.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [])
def test_formset_with_ordering_and_deletion(self):
# FormSets with ordering + deletion ###########################################
# Let's try throwing ordering and deletion into the same form.
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{'choice': 'Calexico', 'votes': 100},
{'choice': 'Fergie', 'votes': 900},
{'choice': 'The Decemberists', 'votes': 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists" /></li>
<li>Votes: <input type="number" name="choices-2-votes" value="500" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" value="3" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>
<li>Order: <input type="number" name="choices-3-ORDER" /></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE" /></li>""")
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': 'Calexico'},
])
self.assertEqual([form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': 'Fergie'}])
def test_invalid_deleted_form_with_ordering(self):
# Should be able to get ordered forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(form=Person, can_delete=True, can_order=True)
p = PeopleForm({
'form-0-name': '',
'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(p.is_valid())
self.assertEqual(p.ordered_forms, [])
def test_clean_hook(self):
# FormSet clean hook ##########################################################
# FormSets have a hook for doing extra validation that shouldn't be tied to any
# particular form. It follows the same pattern as the clean hook on Forms.
# We start out with a some duplicate data.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), 'You may only specify a drink once.')
# Make sure we didn't break the valid case.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Bloody Mary',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
# Limiting the maximum number of forms ########################################
# Base case for max_num.
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th><td><input type="text" name="form-2-name" id="id_form-2-name" /></td></tr>""")
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
# Ensure that max_num has no effect when extra is less than max_num.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>""")
def test_max_num_with_initial_data(self):
# max_num with initial data
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the initial and extra
# parameters.
initial = [
{'name': 'Fernet and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
def test_max_num_zero(self):
# If max_num is 0 then no form is rendered at all, regardless of extra,
# unless initial data is present. (This changed in the patch for bug
# 20084 -- previously max_num=0 trumped initial data)
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
# test that initial trumps max_num
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input id="id_form-0-name" name="form-0-name" type="text" value="Fernet and Coke" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>""")
def test_more_initial_than_max_num(self):
# More initial forms than max_num now results in all initial forms
# being displayed (but no extra forms). This behavior was changed
# from max_num taking precedence in the patch for #20084
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th><td><input id="id_form-2-name" name="form-2-name" type="text" value="Jack and Coke" /></td></tr>""")
# One form from initial and extra=3 with max_num=2 should result in the one
# initial form and one extra.
initial = [
{'name': 'Gin Tonic'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
def test_regression_6926(self):
# Regression test for #6926 ##################################################
# Make sure the management form has the correct prefix.
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, 'form')
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, 'form')
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, 'form')
def test_regression_12878(self):
# Regression test for #12878 #################################################
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['You may only specify a drink once.'])
def test_formset_iteration(self):
# Regression tests for #16455 -- formset instances are iterable
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# confirm iterated formset yields formset.forms
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# confirm indexing of formset
self.assertEqual(formset[0], forms[0])
try:
formset[3]
self.fail('Requesting an invalid formset index should raise an exception')
except IndexError:
pass
# Formets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
return reversed(self.forms)
def __getitem__(self, idx):
return super(BaseReverseFormSet, self).__getitem__(len(self) - idx - 1)
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# confirm that __iter__ modifies rendering order
# compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
def test_formset_nonzero(self):
"""
Formsets with no forms should still evaluate as true.
Regression test for #15722
"""
ChoiceFormset = formset_factory(Choice, extra=0)
formset = ChoiceFormset()
self.assertEqual(len(formset.forms), 0)
self.assertTrue(formset)
def test_formset_splitdatetimefield(self):
"""
Formset should also work with SplitDateTimeField(initial=datetime.datetime.now).
Regression test for #18709.
"""
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-when_0': '1904-06-16',
'form-0-when_1': '15:51:33',
}
formset = SplitDateTimeFormSet(data)
self.assertTrue(formset.is_valid())
def test_formset_error_class(self):
# Regression tests for #16479 -- formsets form use ErrorList instead of supplied error_class
class CustomErrorList(ErrorList):
pass
formset = FavoriteDrinksFormSet(error_class=CustomErrorList)
self.assertEqual(formset.forms[0].error_class, CustomErrorList)
def test_formset_calls_forms_is_valid(self):
# Regression tests for #18574 -- make sure formsets call
# is_valid() on each form.
class AnotherChoice(Choice):
def is_valid(self):
self.is_valid_called = True
return super(AnotherChoice, self).is_valid()
AnotherChoiceFormSet = formset_factory(AnotherChoice)
data = {
'choices-TOTAL_FORMS': '1', # number of forms rendered
'choices-INITIAL_FORMS': '0', # number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
formset = AnotherChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertTrue(all(form.is_valid_called for form in formset.forms))
def test_hard_limit_on_instantiated_forms(self):
"""A formset has a hard limit on the number of forms instantiated."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 2
ChoiceFormSet = formset_factory(Choice, max_num=1)
# someone fiddles with the mgmt form data...
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# But we still only instantiate 3 forms
self.assertEqual(len(formset.forms), 3)
# and the formset isn't valid
self.assertFalse(formset.is_valid())
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_increase_hard_limit(self):
"""Can increase the built-in forms limit via a higher max_num."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 3
# for this form, we want a limit of 4
ChoiceFormSet = formset_factory(Choice, max_num=4)
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# Four forms are instantiated and no exception is raised
self.assertEqual(len(formset.forms), 4)
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_non_form_errors_run_full_clean(self):
# Regression test for #11160
# If non_form_errors() is called without calling is_valid() first,
# it should ensure that full_clean() is called.
class BaseCustomFormSet(BaseFormSet):
def clean(self):
raise ValidationError("This is a non-form error")
ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIsInstance(formset.non_form_errors(), ErrorList)
self.assertEqual(list(formset.non_form_errors()),
['This is a non-form error'])
def test_validate_max_ignores_forms_marked_for_deletion(self):
class CheckForm(Form):
field = IntegerField()
data = {
'check-TOTAL_FORMS': '2',
'check-INITIAL_FORMS': '0',
'check-MAX_NUM_FORMS': '1',
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
}
CheckFormSet = formset_factory(CheckForm, max_num=1, validate_max=True,
can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
def test_formset_total_error_count(self):
"""A valid formset should have 0 total errors."""
data = [ # formset_data, expected error count
([('Calexico', '100')], 0),
([('Calexico', '')], 1),
([('', 'invalid')], 2),
([('Calexico', '100'), ('Calexico', '')], 1),
([('Calexico', ''), ('Calexico', '')], 2),
]
for formset_data, expected_error_count in data:
formset = self.make_choiceformset(formset_data)
self.assertEqual(formset.total_error_count(), expected_error_count)
def test_formset_total_error_count_with_non_form_errors(self):
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 1)
data['choices-1-votes'] = ''
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 2)
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class FormsetAsFooTests(TestCase):
def test_as_table(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(formset.as_table(), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MIN_NUM_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" value="Calexico" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" value="100" /></td></tr>""")
def test_as_p(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(formset.as_p(), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MIN_NUM_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<p>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></p>
<p>Votes: <input type="number" name="choices-0-votes" value="100" /></p>""")
def test_as_ul(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(formset.as_ul(), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MIN_NUM_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>""")
# Regression test for #11418 #################################################
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(TestCase):
def test_no_data_raises_validation_error(self):
with self.assertRaises(ValidationError):
ArticleFormSet({}).is_valid()
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
'form-1-title': 'Test',
'form-1-pub_date': '', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': ['This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = []
empty_forms.append(unbound_formset.empty_form)
empty_forms.append(bound_formset.empty_form)
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(TestCase):
def test_empty_formset_is_valid(self):
"""Test that an empty formset still calls clean()"""
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '0'}, prefix="form")
formset2 = EmptyFsetWontValidateFormset(data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '1', 'form-0-name': 'bah'}, prefix="form")
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
def test_empty_formset_media(self):
"""Make sure media is available on empty formset, refs #19545"""
class MediaForm(Form):
class Media:
js = ('some-file.js',)
self.assertIn('some-file.js', str(formset_factory(MediaForm, extra=0)().media))
def test_empty_formset_is_multipart(self):
"""Make sure `is_multipart()` works with empty formset, refs #19545"""
class FileForm(Form):
file = FileField()
self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
|
|
'''
StreamingExtensions is a plug-in to both GUI menu and command line/web service
that provides an alternative approach to big instance documents without building a DOM, to save
memory footprint. lxml iterparse is used to parse the big instance. ModelObjects are specialized by features
for efficiency and to avoid dependency on an underlying DOM.
(Note that this module is based on iterparse, the module under the installation/plugs is much faster.)
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
import io, sys, os, time
from decimal import Decimal, InvalidOperation
from lxml import etree
from collections import defaultdict
from arelle import XbrlConst, XmlUtil, XmlValidate, ValidateXbrlDimensions
from arelle.ModelDocument import ModelDocument, Type
from arelle.ModelObject import ModelObject
from arelle.ModelObjectFactory import parser
from arelle.ModelValue import QName
from arelle.ModelInstanceObject import ModelContext, ModelFact, ModelUnit
from arelle.Validate import Validate
_streamingExtensionsValidate = False
_streamingExtensionsCheck = False
def precedingProcessingInstruction(elt, target):
pi = elt.getprevious()
while pi is not None:
if isinstance(pi, etree._ProcessingInstruction) and pi.target == target:
return pi
pi = pi.getprevious()
return None
def streamingExtensionsLoader(modelXbrl, mappedUri, filepath):
# check if big instance and has header with an initial incomplete tree walk (just 2 elements
def logSyntaxErrors(parsercontext):
for error in parsercontext.error_log:
modelXbrl.error("xmlSchema:syntax",
_("%(error)s, %(fileName)s, line %(line)s, column %(column)s, %(sourceAction)s source element"),
modelObject=modelDocument, fileName=os.path.basename(filepath),
error=error.message, line=error.line, column=error.column, sourceAction="streaming")
#### note: written for iterparse of lxml prior to version 3.3, otherwise rewrite to use XmlPullParser ###
#### note: iterparse wants a binary file, but file is text mode
_file, = modelXbrl.fileSource.file(filepath, binary=True)
startedAt = time.time()
modelXbrl.profileActivity()
parsercontext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
foundInstance = False
foundErrors = False
streamingAspects = None
numRootFacts1 = 0
numElts = 0
elt = None
for event, elt in parsercontext:
if event == "start":
if elt.getparent() is not None:
if elt.getparent().tag == "{http://www.xbrl.org/2003/instance}xbrl":
if not foundInstance:
foundInstance = True
pi = precedingProcessingInstruction(elt, "xbrl-streamable-instance")
if pi is None:
break
else:
streamingAspects = dict(pi.attrib.copy())
if not elt.tag.startswith("{http://www.xbrl.org/"):
numRootFacts1 += 1
if numRootFacts1 % 1000 == 0:
modelXbrl.profileActivity("... streaming tree check", minTimeToShow=20.0)
elif not foundInstance:
break
elif elt.tag == "{http://www.xbrl.org/2003/instance}xbrl" and precedingProcessingInstruction(elt, "xbrl-streamable-instance") is not None:
modelXbrl.error("streamingExtensions:headerMisplaced",
_("Header is misplaced: %(error)s, must follow xbrli:xbrl element"),
modelObject=elt)
elif event == "end":
elt.clear()
numElts += 1
if numElts % 1000 == 0 and elt.getparent() is not None:
while elt.getprevious() is not None and elt.getparent() is not None:
del elt.getparent()[0]
if elt is not None:
elt.clear()
_file.seek(0,io.SEEK_SET) # allow reparsing
if not foundInstance or streamingAspects is None:
del elt, parsercontext
_file.close()
return None
modelXbrl.profileStat(_("streaming tree check"), time.time() - startedAt)
startedAt = time.time()
try:
version = Decimal(streamingAspects.get("version"))
if int(version) != 1:
modelXbrl.error("streamingExtensions:unsupportedVersion",
_("Streaming version %(version)s, major version number must be 1"),
modelObject=elt, version=version)
foundErrors = True
except (InvalidOperation, OverflowError):
modelXbrl.error("streamingExtensions:versionError",
_("Version %(version)s, number must be 1.n"),
modelObject=elt, version=streamingAspects.get("version", "(none)"))
foundErrors = True
for bufAspect in ("contextBuffer", "unitBuffer", "footnoteBuffer"):
try:
bufLimit = Decimal(streamingAspects.get(bufAspect, "INF"))
if bufLimit < 1 or (bufLimit.is_finite() and bufLimit % 1 != 0):
raise InvalidOperation
elif bufAspect == "contextBuffer":
contextBufferLimit = bufLimit
elif bufAspect == "unitBuffer":
unitBufferLimit = bufLimit
elif bufAspect == "footnoteBuffer":
footnoteBufferLimit = bufLimit
except InvalidOperation:
modelXbrl.error("streamingExtensions:valueError",
_("Streaming %(attrib)s %(value)s, number must be a positive integer or INF"),
modelObject=elt, attrib=bufAspect, value=streamingAspects.get(bufAspect))
foundErrors = True
if parsercontext.error_log:
foundErrors = True
logSyntaxErrors(parsercontext)
if foundErrors:
_file.close()
return None
parsercontext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
_parser, _parserLookupName, _parserLookupClass = parser(modelXbrl,filepath)
eltMdlObjs = {}
beforeInstanceStream = True
validator = None
contextBuffer = []
unitBuffer = []
footnoteBuffer = []
factBuffer = []
numFacts = numRootFacts2 = 1
for event, elt in parsercontext:
if event == "start":
mdlObj = _parser.makeelement(elt.tag, attrib=elt.attrib, nsmap=elt.nsmap)
mdlObj.sourceline = elt.sourceline
eltMdlObjs[elt] = mdlObj
if elt.getparent() is None:
modelDocument = ModelDocument(modelXbrl, Type.INSTANCE, mappedUri, filepath, etree.ElementTree(mdlObj))
modelDocument.xmlRootElement = mdlObj
modelXbrl.modelDocument = modelDocument # needed for incremental validation
mdlObj.init(modelDocument)
modelXbrl.info("streamingExtensions:streaming",
_("Stream processing this instance."),
modelObject = modelDocument)
else:
eltMdlObjs[elt.getparent()].append(mdlObj)
mdlObj._init()
ns = mdlObj.namespaceURI
ln = mdlObj.localName
if (beforeInstanceStream and (
(ns == XbrlConst.link and ln not in ("schemaRef", "linkbaseRef")) or
(ns == XbrlConst.xbrli and ln in ("context", "unit")) or
(ns not in (XbrlConst.link, XbrlConst.xbrli)))):
beforeInstanceStream = False
if _streamingExtensionsValidate:
validator = Validate(modelXbrl)
validator.instValidator.validate(modelXbrl, modelXbrl.modelManager.formulaOptions.typedParameters())
else: # need default dimensions
ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
mdlObj = None # deref
elif event == "end":
mdlObj = eltMdlObjs.pop(elt)
if elt.text: # text available after child nodes processed
mdlObj.text = elt.text
ns = mdlObj.namespaceURI
ln = mdlObj.localName
parentMdlObj = mdlObj.getparent()
if ns == XbrlConst.xbrli:
if ln == "context":
if mdlObj.get("sticky"):
del mdlObj.attrib["sticky"]
modelDocument.contextDiscover(mdlObj)
else:
if _streamingExtensionsValidate and len(contextBuffer) >= contextBufferLimit:
# drop before adding as dropped may have same id as added
cntx = contextBuffer.pop(0)
dropContext(modelXbrl, cntx)
del parentMdlObj[parentMdlObj.index(cntx)]
cntx = None
modelDocument.contextDiscover(mdlObj)
if contextBufferLimit.is_finite():
contextBuffer.append(mdlObj)
if _streamingExtensionsValidate:
contextsToCheck = (mdlObj,)
validator.instValidator.checkContexts(contextsToCheck)
if modelXbrl.hasXDT:
validator.instValidator.checkContextsDimensions(contextsToCheck)
del contextsToCheck # dereference
elif ln == "unit":
if _streamingExtensionsValidate and len(unitBuffer) >= unitBufferLimit:
# drop before additing as dropped may have same id as added
unit = unitBuffer.pop(0)
dropUnit(modelXbrl, unit)
del parentMdlObj[parentMdlObj.index(unit)]
unit = None
modelDocument.unitDiscover(mdlObj)
if unitBufferLimit.is_finite():
unitBuffer.append(mdlObj)
if _streamingExtensionsValidate:
validator.instValidator.checkUnits( (mdlObj,) )
elif ln == "xbrl": # end of document
# check remaining footnote refs
for footnoteLink in footnoteBuffer:
checkFootnoteHrefs(modelXbrl, footnoteLink)
elt.clear()
elif ns == XbrlConst.link:
if ln in ("schemaRef", "linkbaseRef"):
modelDocument.discoverHref(mdlObj)
elif ln in ("roleRef", "arcroleRef"):
modelDocument.linkbaseDiscover((mdlObj,), inInstance=True)
elif ln == "footnoteLink":
footnoteLinks = (mdlObj,)
modelDocument.linkbaseDiscover(footnoteLinks, inInstance=True)
if footnoteBufferLimit.is_finite():
footnoteBuffer.append(mdlObj)
if _streamingExtensionsValidate:
validator.instValidator.checkLinks(footnoteLinks)
if len(footnoteBuffer) > footnoteBufferLimit:
# check that hrefObjects for locators were all satisfied
# drop before addition as dropped may have same id as added
footnoteLink = footnoteBuffer.pop(0)
checkFootnoteHrefs(modelXbrl, footnoteLink)
dropFootnoteLink(modelXbrl, footnoteLink)
del parentMdlObj[parentMdlObj.index(footnoteLink)]
footnoteLink = None
footnoteLinks = None
elt.clear()
elif parentMdlObj.qname == XbrlConst.qnXbrliXbrl:
numRootFacts2 += 1
modelDocument.factDiscover(mdlObj, modelXbrl.facts)
XmlValidate.validate(modelXbrl, mdlObj)
if _streamingExtensionsValidate:
factsToCheck = (mdlObj,)
validator.instValidator.checkFacts(factsToCheck)
if modelXbrl.hasXDT:
validator.instValidator.checkFactsDimensions(factsToCheck)
del factsToCheck
dropFact(modelXbrl, mdlObj, modelXbrl.facts)
del parentMdlObj[parentMdlObj.index(mdlObj)]
if numRootFacts2 % 1000 == 0:
modelXbrl.profileActivity("... streaming fact {0} of {1} {2:.2f}%".format(numRootFacts2, numRootFacts1, 100.0 * numRootFacts2 / numRootFacts1),
minTimeToShow=20.0)
# get rid of root element from iterparse's tree
elt.clear()
while elt.getprevious() is not None: # cleans up any prior siblings
del elt.getparent()[0]
mdlObj = None # deref
logSyntaxErrors(parsercontext)
del parsercontext
if validator is not None:
validator.close()
_file.close()
modelXbrl.profileStat(_("streaming complete"), time.time() - startedAt)
return modelDocument
def checkFootnoteHrefs(modelXbrl, footnoteLink):
for locElt in footnoteLink.iterchildren(tag="{http://www.xbrl.org/2003/linkbase}loc"):
for hrefElt, doc, id in footnoteLink.modelDocument.hrefObjects:
if locElt == hrefElt and id not in footnoteLink.modelDocument.idObjects:
modelXbrl.error("streamingExtensions:footnoteId",
_("Footnote id %(id)s not matched to fact in buffered region"),
modelObject=footnoteLink, id=id)
def dropContext(modelXbrl, cntx):
del modelXbrl.contexts[cntx.id]
dropObject(modelXbrl, cntx)
def dropUnit(modelXbrl, unit):
del modelXbrl.units[unit.id]
dropObject(modelXbrl, unit)
def dropFootnoteLink(modelXbrl, footnoteLink):
for baseSet in modelXbrl.baseSets.values():
if footnoteLink in baseSet:
baseSet.remove(footnoteLink)
dropObject(modelXbrl, footnoteLink)
def dropFact(modelXbrl, fact, facts):
while fact.modelTupleFacts:
dropFact(modelXbrl, fact.modelTupleFacts[0], fact.modelTupleFacts)
modelXbrl.factsInInstance.discard(fact)
facts.remove(fact)
modelXbrl.modelObjects[fact.objectIndex] = None # objects found by index, can't remove position from list
fact.modelDocument.modelObjects.remove(fact)
fact.clear()
def dropObject(modelXbrl, mdlObj):
for childObj in mdlObj.iterchildren():
dropObject(modelXbrl, childObj)
if mdlObj.qname == XbrlConst.qnLinkLoc:
hrefs = mdlObj.modelDocument.hrefObjects
removedHrefs = [i for i, hrefObj in enumerate(hrefs) if mdlObj == hrefObj[0]]
for i in removedHrefs:
del hrefs[i]
modelXbrl.modelObjects[mdlObj.objectIndex] = None # objects found by index, can't remove position from list
mdlObj.modelDocument.modelObjects.remove(mdlObj)
mdlObj.modelDocument.idObjects.pop(mdlObj.id, None)
mdlObj.clear()
def streamingOptionsExtender(parser):
parser.add_option("--check-streaming",
action="store_true",
dest="check_streaming",
help=_('Check streamability of instance document."'))
def streamingExtensionsSetup(self, options, **kwargs):
global _streamingExtensionsCheck, _streamingExtensionsValidate
_streamingExtensionsCheck = getattr(options, 'check_streaming', False)
_streamingExtensionsValidate = options.validate
if options.validate:
options.validate = False # prevent cmdLine calling validation
'''
Do not use _( ) in pluginInfo itself (it is applied later, after loading
'''
__pluginInfo__ = {
'name': 'Streaming Extensions Loader',
'version': '0.9',
'description': "This plug-in loads big XBRL instances without building a DOM in memory. "
"lxml iterparse parses XBRL directly into an object model without a DOM. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2014 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrCmdLine.Options': streamingOptionsExtender,
'CntlrCmdLine.Utility.Run': streamingExtensionsSetup,
'ModelDocument.PullLoader': streamingExtensionsLoader,
}
|
|
import argparse
from pathlib import Path
import sys
import os
from platform import platform
import traceback
import astunparse
from mochi import __version__, IS_PYPY, GE_PYTHON_34, GE_PYTHON_33
from mochi.parser import lex, REPL_CONTINUE, ParsingError
from .builtins import current_error_port, eval_sexp_str, eval_tokens
from .global_env import global_env
from .translation import syntax_table, global_scope, translator, ast2py
MONKEY_PATCH_ENV = 'import_global_env_and_monkey_patch.mochi'
GLOBAL_ENV = 'import_global_env.mochi'
def output_code(code):
import marshal
marshal.dump(code, sys.stdout.buffer)
def output_pyc(code, buffer=sys.stdout.buffer):
import marshal
import struct
import time
if GE_PYTHON_34:
from importlib.util import MAGIC_NUMBER
else:
import imp
MAGIC_NUMBER = imp.get_magic()
buffer.write(MAGIC_NUMBER)
timestamp = struct.pack('i', int(time.time()))
if GE_PYTHON_33:
buffer.write(timestamp)
buffer.write(b'0' * 4)
else:
buffer.write(timestamp)
marshal.dump(code, buffer)
buffer.flush()
def compile_file(src_path, optimize=-1, show_tokens=False):
# binding_name_set_stack[0].update(global_env.keys())
py_ast = translator.translate_file(src_path, show_tokens=show_tokens)
return compile(py_ast, src_path, 'exec', optimize=optimize)
def load_file(path, env):
return exec(compile_file(path), env)
def execute_compiled_file(path):
import marshal
orig_main = sys.modules['__main__']
sys.modules['__main__'] = global_env
try:
with open(path, 'rb') as compiled_file:
return exec(marshal.load(compiled_file), global_env)
finally:
sys.modules['__main__'] = orig_main
def interact(show_tokens=False):
try:
import readline
except ImportError:
pass
sys.modules['__main__'] = global_env
while True:
buffer = ''
continuation_flag = False
tokens = []
while True:
try:
if continuation_flag:
s = input('... ')
if s == '\n':
continue
buffer = buffer + '\n' + s
else:
s = input('>>> ')
if s == '\n':
continue
buffer = s
except EOFError:
print()
sys.exit()
try:
lexer = lex(buffer, repl_mode=True, debug=show_tokens)
for last in lexer:
tokens.append(last)
if len(tokens) == 0:
buffer = ''
continue
if last is REPL_CONTINUE or last.name == 'COLON' or last.name == 'THINARROW':
continuation_flag = True
tokens = []
continue
else:
break
except Exception:
traceback.print_exc(file=current_error_port)
continuation_flag = False
buffer = ''
continue
try:
eval_tokens(tokens)
except ParsingError as e:
print(e, file=current_error_port)
except Exception:
traceback.print_exc(file=current_error_port)
def init(no_monkeypatch=False):
if hasattr(init, '__called') and init.__called:
return
else:
init.__called = True
import eventlet
def eval_from_file(path_obj):
"""Evaluate sexpression in given file.
"""
with path_obj.open() as fobj:
expr = fobj.read()
eval_sexp_str(expr)
if no_monkeypatch:
pass
elif (not GE_PYTHON_33) or platform().lower().startswith('win'):
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
expr_path = Path(__file__).absolute().parents[1] / 'sexpressions'
eval_from_file(expr_path / 'main.expr')
if not IS_PYPY:
eval_from_file(expr_path / 'cpython.expr')
else:
eval_from_file(expr_path / 'pypy.expr')
eval_from_file(expr_path / 'del_hidden.expr')
for syntax in {'for', 'each', 'while', 'break', 'continue'}:
del syntax_table[syntax]
del global_env[syntax]
del global_scope[syntax]
sys.path.append(os.getcwd())
from mochi.utils.importer import set_importer
set_importer()
def _pyc_compile(in_file_name, env, out_file_name, show_tokens=False):
"""Compile a Mochi file into a Python bytecode file.
"""
if not out_file_name:
out_file = sys.stdout.buffer
else:
out_file = open(out_file_name, 'wb')
target_ast = translator.translate_file(in_file_name, show_tokens=show_tokens)
import_env_file = Path(__file__).absolute().parents[0] / env
import_env_ast = translator.translate_file(import_env_file.as_posix())
target_ast.body = import_env_ast.body + target_ast.body
output_pyc(compile(target_ast, in_file_name, 'exec', optimize=2),
buffer=out_file)
def pyc_compile_monkeypatch(in_file_name, out_file_name=None, show_tokens=False):
env = 'import_global_env_and_monkey_patch.mochi'
_pyc_compile(in_file_name, env, out_file_name, show_tokens=show_tokens)
def pyc_compile_no_monkeypatch(in_file_name, out_file_name=None, show_tokens=False):
env = 'import_global_env.mochi'
_pyc_compile(in_file_name, env, out_file_name, show_tokens=show_tokens)
def make_py_source_file(mochi_file_name, python_file_name=None, mochi_env='',
add_init=False, show_tokens=False):
"""Generate Python source code from Mochi code.
"""
ast = translator.translate_file(mochi_file_name, show_tokens=show_tokens)
if mochi_env:
env_file = Path(__file__).absolute().parents[0] / mochi_env
with open(env_file.as_posix()) as fobj:
mochi_env = fobj.read()
py_source = clean_source(ast2py(ast, mochi_env, add_init=add_init))
if not python_file_name:
print(py_source)
else:
with open(python_file_name, 'w') as fobj:
fobj.write(py_source)
def pprint_ast(mochi_file_name, ast_file_name=None, show_tokens=False):
"""Generate a nicly formatted AST from Mochi code.
"""
ast = translator.translate_file(mochi_file_name, show_tokens=show_tokens)
py_source = astunparse.dump(ast)
if not ast_file_name:
print(py_source)
else:
with open(ast_file_name, 'w') as fobj:
fobj.write(py_source)
def clean_source(source):
# TODO: Fix AST generation so this function is not needed.
"""Dirty cleaning of dirty source."""
# replace '$_x' with 'arg_x' x = 1, 2, 3 ... 9
if '$' in source:
for number in range(1, 10):
source = source.replace('${}'.format(number),
'arg_{}'.format(number))
# remove extra `try` with no use but messing up syntax
if 'try' in source:
lines = source.splitlines()
new_lines = [line for line in lines if not line.strip() == 'try']
source = '\n'.join(new_lines)
if '|>(' in source:
source = source.replace('|>(', 'bind(')
val = 'val('
if val in source:
lines = source.splitlines()
new_lines = []
for line in lines:
if line.strip().startswith(val):
spaces = int(line.index(val)) * ' '
name, value = line.split(val)[1].split(',', 1)
assign = '{} = {}'.format(name, value[:-1])
new_lines.append(spaces + assign)
else:
new_lines.append(line)
source = '\n'.join(new_lines)
# TODO: fix `&
#if '&' in source:
# source = source.replace('&', '*_rest')
return source
def parse_args():
arg_parser = argparse.ArgumentParser(
description='Mochi is a functional programming language.')
arg_parser.add_argument('-v', '--version', action='version',
version=__version__)
arg_parser.add_argument('-c', '--compile', action='store_true',
help='Show marshalled code.')
arg_parser.add_argument('-pyc', '--pyc-compile', action='store_true',
help='Generate Python bytecode from Mochi file.')
arg_parser.add_argument('-py', '--py-source', action='store_true',
help='Generate Python source code from Mochi file.')
arg_parser.add_argument('-a', '--ast', action='store_true',
help='Generate AST from Mochi file.')
arg_parser.add_argument('-o', '--outfile', nargs='?', type=str,
help='Name of output file.')
arg_parser.add_argument('-no-mp', '--no-monkeypatch',
action='store_true')
arg_parser.add_argument('-init', '--add-init-code', action='store_true',
help='Add Mochi init code to Python source code '
'files. This allows running the generated '
'file from the command line with Python.')
arg_parser.add_argument('-e', '--execute-compiled-file',
action='store_true')
arg_parser.add_argument('file', nargs='?', type=str)
arg_parser.add_argument('--show-tokens', dest='tokens',
help='Shows the results of the tokenizing step.',
action='store_true')
return arg_parser.parse_args()
def main():
args = parse_args()
init(args.no_monkeypatch)
if args.file:
try:
if args.no_monkeypatch:
env = GLOBAL_ENV
else:
env = MONKEY_PATCH_ENV
if args.compile:
output_code(compile_file(args.file,
optimize=2,
show_tokens=args.tokens))
elif args.execute_compiled_file:
execute_compiled_file(args.file)
elif args.pyc_compile:
if args.no_monkeypatch:
pyc_compile_no_monkeypatch(in_file_name=args.file,
out_file_name=args.outfile,
show_tokens=args.tokens)
else:
pyc_compile_monkeypatch(in_file_name=args.file,
out_file_name=args.outfile,
show_tokens=args.tokens)
elif args.py_source:
make_py_source_file(mochi_file_name=args.file,
python_file_name=args.outfile,
mochi_env=env,
show_tokens=args.tokens,
add_init=args.add_init_code)
elif args.ast:
pprint_ast(mochi_file_name=args.file,
ast_file_name=args.outfile, show_tokens=args.tokens)
else:
sys.modules['__main__'] = global_env
load_file(args.file, global_env)
except ParsingError as e:
print(e, file=sys.stderr)
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(0)
else:
interact(args.tokens)
if __name__ == '__main__':
main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import server_lib
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import compat
class IteratorTest(test.TestCase, parameterized.TestCase):
def testNoGradients(self):
component = constant_op.constant([1.])
side = constant_op.constant(0.)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset.make_one_shot_iterator().get_next()
self.assertIsNone(gradients_impl.gradients(value, component)[0])
self.assertIsNone(gradients_impl.gradients(value, side)[0])
self.assertIsNone(gradients_impl.gradients(value, [component, side])[0])
def testCapturingStateInOneShotRaisesException(self):
var = variables.Variable(37.0, name="myvar")
dataset = (
dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
.map(lambda x: x + var))
with self.assertRaisesRegexp(
ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
"datasets that capture stateful objects.+myvar"):
dataset.make_one_shot_iterator()
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for j in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % j
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
with self.cached_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
def testSimpleSharedResource(self):
components = (np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64))
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = (
dataset_ops.Dataset.from_tensors(components)
.map(lambda x, y, z: (x, y, z)).make_initializable_iterator(
shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (
dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
get_next = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(dataset_3.output_types,
[None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(dataset_3.output_types, iterator.output_types)
self.assertEqual(dataset_4.output_types, iterator.output_types)
self.assertEqual([None], iterator.output_shapes.as_list())
with self.cached_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int32),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int64),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64))))
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_4 = dataset_4.make_one_shot_iterator()
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_3.output_types, dataset_3.output_shapes)
next_element = feedable_iterator.get_next()
self.assertEqual(dataset_3.output_types, feedable_iterator.output_types)
self.assertEqual(dataset_4.output_types, feedable_iterator.output_types)
self.assertEqual([], feedable_iterator.output_shapes)
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
def testIteratorStringHandleFuture(self):
with forward_compat.forward_compatibility_horizon(2018, 8, 4):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_4 = dataset_4.make_one_shot_iterator()
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_3.output_types, dataset_3.output_shapes)
next_element = feedable_iterator.get_next()
self.assertEqual(dataset_3.output_types, feedable_iterator.output_types)
self.assertEqual(dataset_4.output_types, feedable_iterator.output_types)
self.assertEqual([], feedable_iterator.output_shapes)
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset.make_one_shot_iterator()
initializable_iterator = dataset.make_initializable_iterator()
structure_iterator = iterator_ops.Iterator.from_structure(
dataset.output_types)
created_ops = len(ops.get_default_graph().get_operations())
self.assertIs(one_shot_iterator.string_handle(),
one_shot_iterator.string_handle())
self.assertIs(initializable_iterator.string_handle(),
initializable_iterator.string_handle())
self.assertIs(structure_iterator.string_handle(),
structure_iterator.string_handle())
# Assert that getting the (default) string handle creates no ops.
self.assertEqual(created_ops, len(ops.get_default_graph().get_operations()))
# Specifying an explicit name will create a new op.
handle_with_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo", handle_with_name.op.name)
self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name)
handle_with_same_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo_1", handle_with_same_name.op.name)
self.assertIsNot(handle_with_name, handle_with_same_name)
def testIteratorStringHandleError(self):
dataset_int_scalar = (
dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.cached_session() as sess:
handle_int_scalar = sess.run(
dataset_int_scalar.make_one_shot_iterator().string_handle())
handle_float_vector = sess.run(
dataset_float_vector.make_one_shot_iterator().string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self):
s1 = server_lib.Server.create_local_server()
s2 = server_lib.Server.create_local_server()
s3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
workers = cluster_def.job.add()
workers.name = "worker"
workers.tasks[0] = s1.target[len("grpc://"):]
workers.tasks[1] = s2.target[len("grpc://"):]
client = cluster_def.job.add()
client.name = "client"
client.tasks[0] = s3.target[len("grpc://"):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
worker_devices = [
"/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2)
]
itr_handles = []
for device in worker_devices:
with ops.device(device):
src = dataset_ops.Dataset.from_tensor_slices([device])
itr = src.make_one_shot_iterator()
itr_handles.append(itr.string_handle())
targets = dataset_ops.Dataset.from_tensor_slices(worker_devices)
handles = dataset_ops.Dataset.from_tensor_slices(itr_handles)
@function.Defun(dtypes.string)
def loading_func(h):
remote_itr = iterator_ops.Iterator.from_string_handle(
h, itr.output_types, itr.output_shapes)
return remote_itr.get_next()
def map_fn(target, handle):
return functional_ops.remote_call(
args=[handle], Tout=[dtypes.string], f=loading_func, target=target)
with ops.device("/job:client"):
client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn)
itr = client_dataset.make_initializable_iterator()
n = itr.get_next()
with session.Session(s3.target, config=config) as sess:
sess.run(itr.initializer)
expected_values = worker_devices
for expected in expected_values:
self.assertEqual((compat.as_bytes(expected),), sess.run(n))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_3.make_one_shot_iterator()
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_3.output_types, dataset_3.output_shapes)
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.cached_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
def testIncorrectIteratorRestore(self):
def _path():
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
_path(), parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(_path()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def _build_range_dataset_graph():
start = 1
stop = 10
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
def _build_reader_dataset_graph():
filenames = ["test"] # Does not exist but we don't care in this test.
iterator = readers.FixedLengthRecordDataset(
filenames, 1, 0, 0).make_initializable_iterator()
init_op = iterator.initializer
get_next_op = iterator.get_next()
save_op = _save_op(iterator._iterator_resource)
restore_op = _restore_op(iterator._iterator_resource)
return init_op, get_next_op, save_op, restore_op
# Saving iterator for RangeDataset graph.
with ops.Graph().as_default() as g:
init_op, _, save_op, _ = _build_range_dataset_graph()
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(save_op)
# Attempt to restore the saved iterator into an IteratorResource of
# incompatible type. An iterator of RangeDataset has output type int64,
# while an iterator of FixedLengthRecordDataset has output type string.
# So an InvalidArgumentError should be raised by
# IteratorResource::set_iterator.
with ops.Graph().as_default() as g:
_, _, _, restore_op = _build_reader_dataset_graph()
with self.session(graph=g) as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(restore_op)
def testRepeatedGetNextWarning(self):
iterator = dataset_ops.Dataset.range(10).make_one_shot_iterator()
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
for _ in range(100):
iterator.get_next()
self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w))
for warning in w:
self.assertIn(
iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE, str(warning.message))
def testEagerIteratorAsync(self):
with context.eager_mode(), context.execution_mode(context.ASYNC):
val = 0
dataset = dataset_ops.Dataset.range(10)
for foo in dataset:
self.assertEqual(val, foo.numpy())
val += 1
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
structure.TensorStructure(dtypes.float32, []),
ops.Tensor, dtypes.float32, []),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]),
structure.SparseTensorStructure(dtypes.int32, [1]),
sparse_tensor.SparseTensor, dtypes.int32, [1]),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))},
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.TensorStructure(dtypes.string, [1]),
structure.TensorStructure(dtypes.string, []))}),
{"a": ops.Tensor, "b": (ops.Tensor, ops.Tensor)},
{"a": dtypes.float32, "b": (dtypes.string, dtypes.string)},
{"a": [], "b": ([1], [])}),
)
def testIteratorStructure(self, tf_value_fn, expected_element_structure,
expected_output_classes, expected_output_types,
expected_output_shapes):
tf_value = tf_value_fn()
iterator = dataset_ops.Dataset.from_tensors(
tf_value).make_one_shot_iterator()
self.assertTrue(expected_element_structure.is_compatible_with(
iterator._element_structure))
self.assertTrue(iterator._element_structure.is_compatible_with(
expected_element_structure))
self.assertEqual(expected_output_classes, iterator.output_classes)
self.assertEqual(expected_output_types, iterator.output_types)
self.assertEqual(expected_output_shapes, iterator.output_shapes)
def testIteratorGetNextName(self):
with ops.Graph().as_default():
iterator = dataset_ops.Dataset.from_tensors(37.0).make_one_shot_iterator()
next_element = iterator.get_next(name="overridden_name")
self.assertEqual("overridden_name", next_element.op.name)
class IteratorCheckpointingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreOneShotIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map(
math_ops.square).batch(2)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator.get_next())
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual([1, 4], get_next())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([9, 16], get_next())
self.assertAllEqual([25, 36], get_next())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual([9, 16], get_next())
self.assertAllEqual([25, 36], get_next())
with self.assertRaises(errors.OutOfRangeError):
get_next()
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreMultipleIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.from_tensor_slices(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
dataset = dataset.map(math_ops.square).batch(2)
iterator_1 = dataset.make_one_shot_iterator()
get_next_1 = iterator_1.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_1.get_next())
iterator_2 = dataset.make_one_shot_iterator()
get_next_2 = iterator_2.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_2.get_next())
dataset_2 = dataset_ops.Dataset.range(10)
iterator_3 = dataset_2.make_one_shot_iterator()
get_next_3 = iterator_3.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator_3.get_next())
checkpoint = checkpointable_utils.Checkpoint(
iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)
self.assertAllEqual([1, 4], get_next_1())
self.assertAllEqual(0, get_next_3())
self.assertAllEqual(1, get_next_3())
self.assertAllEqual(2, get_next_3())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([1, 4], get_next_2())
self.assertAllEqual([9, 16], get_next_2())
self.assertAllEqual(3, get_next_3())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual([9, 16], get_next_1())
self.assertAllEqual([1, 4], get_next_2())
self.assertAllEqual(3, get_next_3())
@test_util.run_in_graph_and_eager_modes
def testRestoreExhaustedIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.range(3)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next if context.executing_eagerly(
) else functools.partial(self.evaluate, iterator.get_next())
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual(0, get_next())
self.assertAllEqual(1, get_next())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual(2, get_next())
checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual(2, get_next())
save_path = checkpoint.save(checkpoint_prefix)
checkpoint.restore(save_path).run_restore_ops()
with self.assertRaises(errors.OutOfRangeError):
get_next()
def testRestoreInReconstructedIteratorInitializable(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dataset = dataset_ops.Dataset.range(10)
iterator = dataset.make_initializable_iterator()
get_next = iterator.get_next()
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
for i in range(5):
with self.cached_session() as sess:
checkpoint.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory)).initialize_or_restore(sess)
for j in range(2):
self.assertEqual(i * 2 + j, sess.run(get_next))
checkpoint.save(file_prefix=checkpoint_prefix)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple language model."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import logging
from lamb import utils
from lamb.cell import build_cell
from lamb.dropout import Dropout
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
class LM(object):
"""A language model."""
def __init__(self, config, _model_only=False):
"""A language model.
Args:
config: A dictionary with the configuration options (see README.md).
_model_only: For internal use only.
"""
if not _model_only:
logging.info('Finalized parameters to follow.')
logging.info('%s', str(config))
logging.info('Building model.')
self._build_model(config)
logging.info('Building loss.')
self._build_loss(config)
self._check_budget(config)
self.config = config
else:
self._build_model(config)
@staticmethod
def num_params(config):
g = tf.Graph()
with g.as_default() as g:
# Speed graph creation up by only expanding the RNN to one step. This
# graph will be discarded anyway.
config.max_time_steps = 1
try:
LM(config, _model_only=True)
except (tf.errors.ResourceExhaustedError,
# Some OOM conditions turn into internal errors.
tf.errors.InternalError):
return None
n = utils.count_trainables()
return n
def _build_model(self, config):
self.global_step_var = tf.Variable(
tf.zeros([], tf.int64), name='global_step', trainable=False)
self.learning_rate = tf.placeholder(
tf.float32, shape=[], name='learning_rate')
## Input variables
self.num_samples = tf.placeholder_with_default(
1, shape=[], name='num_samples')
# For MT, this is the source language text. For LM, this is not used.
if config.conditioning_separator:
assert config.episodic, 'conditioning and non-episodic do not mix.'
self.conditioning = tf.placeholder(
dtype=tf.int64, shape=[config.max_time_steps, None],
name='conditioning')
self.conditioning_len = tf.placeholder(dtype=tf.int64, shape=[None],
name='conditioning_len')
# For plain LM, this is the input text. For MT this is the target language
# text.
self.source = tf.placeholder(
dtype=tf.int64, shape=[config.max_time_steps, None], name='source')
self.source_len = tf.placeholder(dtype=tf.int64, shape=[None],
name='source_len')
# This is the ground truth text to be predicted. A shifted by one version
# version of self.source.
self.target = tf.placeholder(
dtype=tf.int64, shape=[config.max_time_steps, None], name='target')
def maybe_create_dropout_placeholder(configured_dropout_rate, name):
if configured_dropout_rate > 0.0:
return tf.placeholder(tf.float32, shape=[], name=name)
else:
return None
self.embedding_dropout = maybe_create_dropout_placeholder(
config.embedding_dropout, 'embedding_dropout')
self.token_dropout = maybe_create_dropout_placeholder(
config.token_dropout, 'token_dropout')
self.input_dropout = maybe_create_dropout_placeholder(
config.input_dropout, 'input_dropout')
self.inter_layer_dropout = maybe_create_dropout_placeholder(
config.inter_layer_dropout, 'inter_layer_dropout')
self.update_dropout = maybe_create_dropout_placeholder(
config.update_dropout, 'update_dropout')
self.state_dropout = maybe_create_dropout_placeholder(
config.state_dropout, 'state_dropout')
self.flip_prob = maybe_create_dropout_placeholder(
config.state_dropout_flip_rate, 'flip_prob')
self.output_dropout = maybe_create_dropout_placeholder(
config.output_dropout, 'output_dropout')
self.downprojected_output_dropout = maybe_create_dropout_placeholder(
config.downprojected_output_dropout, 'downprojected_output_dropout')
self.softmax_temperature = tf.placeholder_with_default(
1.0, shape=[], name='softmax_temperature')
## Training
embedding_initializer = tf.variance_scaling_initializer(
scale=config.embedding_init_factor, mode='fan_out',
distribution='truncated_normal')
output_initializer = tf.variance_scaling_initializer(
scale=config.output_init_factor, mode='fan_in',
distribution='truncated_normal')
batch_size = tf.shape(self.source)[1]
last_hidden_size = utils.ensure_list(config.hidden_size)[-1]
tb_h = tf.stack([config.max_time_steps*batch_size, last_hidden_size])
t_b_v = tf.stack([config.max_time_steps, batch_size, config.vocab_size])
t_bk_o = tf.stack([
config.max_time_steps,
batch_size*(config.mos_num_components or 1),
config.output_embedding_size])
tbk_o = tf.stack([
config.max_time_steps*
batch_size*(config.mos_num_components or 1),
config.output_embedding_size])
t_b0_s_v = tf.stack(
[config.max_time_steps, tf.div(batch_size, self.num_samples),
self.num_samples, config.vocab_size])
if config.embed_once:
with tf.variable_scope('im', initializer=embedding_initializer):
embedding = tf.get_variable(
'embedding', [config.vocab_size, config.input_embedding_size],
initializer=embedding_initializer, dtype=tf.float32)
if self.embedding_dropout is not None:
embedding = tf.nn.dropout(
embedding, 1-self.embedding_dropout,
noise_shape=tf.stack([config.vocab_size, 1]))
embedded_source = tf.nn.embedding_lookup(embedding, self.source)
if self.token_dropout is not None:
embedding = tf.nn.dropout(
embedding, 1-self.token_dropout,
noise_shape=tf.stack([config.max_time_steps, batch_size, 1]))
if config.scale_input_embeddings:
embedded_source *= tf.sqrt(tf.cast(config.input_embedding_size,
tf.float32))
sources = embedded_source
else:
assert self.embedding_dropout is None, 'Not implemented.'
assert self.token_dropout is None, 'Not implemented.'
sources = self.source
def lm_1(cell, initial_state, inputs, input_lens, scope=None):
# According to tests (2019-03-13) swap_memory carries only a very penalty
# so we use it to choose between dynamic_rnn and static_rnn. For some
# reason, static_rnn can be 2x faster ... sometimes. On the other hand,
# dynamic_rnn handles memory better even without swap_memory=True.
if FLAGS.swap_memory:
return tf.nn.dynamic_rnn(cell=cell, inputs=inputs,
time_major=True,
sequence_length=input_lens,
initial_state=initial_state,
swap_memory=FLAGS.swap_memory,
dtype=tf.float32, scope=scope)
else:
return tf.nn.static_rnn(cell=cell, inputs=tf.unstack(inputs),
sequence_length=input_lens,
initial_state=initial_state,
dtype=tf.float32, scope=scope)
# This is for the config.output_once=True case.
def output_module_1(outputs):
with tf.variable_scope('om', initializer=output_initializer):
# Create the matrix and bias for the final projection into the softmax.
if config.share_input_and_output_embeddings:
assert config.embed_once, 'Not implemented.'
softmax_weights = embedding
softmax_weights_transpose = True
else:
softmax_weights = tf.get_variable(
'weights', [config.output_embedding_size, config.vocab_size],
dtype=tf.float32)
softmax_weights_transpose = False
softmax_bias = tf.get_variable('bias', [1, config.vocab_size],
initializer=tf.zeros_initializer(),
dtype=tf.float32)
def to_softmax(x, dropout=self.downprojected_output_dropout):
if dropout is not None:
if not config.shared_mask_dropout:
x = tf.nn.dropout(x, 1.0-dropout)
else:
x = tf.reshape(x, t_bk_o)
x = tf.nn.dropout(
x, 1.0-dropout,
# same mask for all time steps
noise_shape=[
1, batch_size*(config.mos_num_components or 1),
config.output_embedding_size])
x = tf.reshape(x, tbk_o)
return (
self.softmax_temperature*
(tf.matmul(x, softmax_weights,
transpose_b=softmax_weights_transpose) + softmax_bias))
last_hidden_size = utils.ensure_list(config.hidden_size)[-1]
outputs_t_b_h = tf.convert_to_tensor(outputs)
if self.output_dropout is not None:
if not config.shared_mask_dropout:
outputs_t_b_h = tf.nn.dropout(
outputs_t_b_h, 1.0-self.output_dropout)
else:
outputs_t_b_h = tf.nn.dropout(
outputs_t_b_h, 1.0-self.output_dropout,
noise_shape=[1, batch_size, last_hidden_size])
outputs_tb_h = tf.reshape(outputs_t_b_h, tb_h)
if config.mos_num_components == 0:
if config.output_embedding_size == last_hidden_size:
return (tf.reshape(to_softmax(outputs_tb_h, None), t_b_v),
outputs_t_b_h)
else:
downprojected_outputs_tb_o = utils.linear(
outputs_tb_h, config.output_embedding_size, False,
initializer=utils.orthogonal_initializer(), scope='projection')
logits_tb_v = to_softmax(downprojected_outputs_tb_o)
return tf.reshape(logits_tb_v, t_b_v), outputs_t_b_h
else:
logits_tb_v = utils.mixture_of_softmaxes(
outputs_tb_h, config.mos_num_components,
config.output_embedding_size, to_softmax)
return tf.reshape(logits_tb_v, t_b_v), outputs_t_b_h
# This is for the config.output_once=False case.
def output_module_per_step_1(outputs_b_h):
with tf.variable_scope('om', initializer=output_initializer):
def to_softmax(x, dropout=self.downprojected_output_dropout):
# Create the matrix and bias for the final projection into the
# softmax.
if config.share_input_and_output_embeddings:
assert config.embed_once, 'Not implemented.'
softmax_weights = embedding
softmax_weights_transpose = True
else:
softmax_weights = tf.get_variable(
'weights', [config.output_embedding_size, config.vocab_size],
dtype=tf.float32)
softmax_weights_transpose = False
softmax_bias = tf.get_variable('bias', [1, config.vocab_size],
initializer=tf.zeros_initializer(),
dtype=tf.float32)
if dropout is not None:
x = Dropout(1.0-dropout, share_mask=config.shared_mask_dropout)(x)
return (self.softmax_temperature *
(tf.matmul(x, softmax_weights,
transpose_b=softmax_weights_transpose) +
softmax_bias))
last_hidden_size = utils.ensure_list(config.hidden_size)[-1]
outputs_b_h = Dropout(1.0-self.output_dropout,
share_mask=self.output_dropout)(outputs_b_h)
if config.mos_num_components == 0:
if config.output_embedding_size == last_hidden_size:
return to_softmax(outputs_b_h, None)
else:
downprojected_outputs_b_o = utils.linear(
outputs_b_h, config.output_embedding_size, False,
initializer=utils.orthogonal_initializer(), scope='projection')
logits_b_v = to_softmax(downprojected_outputs_b_o)
return logits_b_v
else:
logits_b_v = utils.mixture_of_softmaxes(
outputs_b_h, config.mos_num_components,
config.output_embedding_size, to_softmax)
return logits_b_v
lm = tf.make_template('lm', lm_1)
def make_cell():
return build_cell(
model=config.model,
num_layers=config.num_layers,
hidden_size=config.hidden_size,
layer_norm=config.layer_norm,
cell_init_factor=config.cell_init_factor,
shared_mask_dropout=config.shared_mask_dropout,
input_dropout=self.input_dropout,
inter_layer_dropout=self.inter_layer_dropout,
state_dropout=self.state_dropout,
update_dropout=self.update_dropout,
state_dropout_flip_rate=self.flip_prob,
tie_forget_and_input_gates=config.tie_forget_and_input_gates,
cap_input_gate=config.cap_input_gate,
forget_bias=config.forget_bias,
feature_mask_rounds=config.feature_mask_rounds,
feature_mask_rank=config.feature_mask_rank,
overlay_rank=config.overlay_rank,
sparsity_ratio=config.sparsity_ratio,
cell_clip=config.cell_clip,
activation_fn=config.activation_fn,
lstm_skip_connection=config.lstm_skip_connection,
residual_connections=config.residual_connections)
def make_conditioning():
if config.embed_once:
with tf.variable_scope('cond_im', initializer=embedding_initializer):
embedding = tf.get_variable(
'embedding', [config.conditioning_vocab_size,
config.input_embedding_size],
initializer=embedding_initializer, dtype=tf.float32)
if self.embedding_dropout is not None:
embedding = tf.nn.dropout(
embedding, 1-self.embedding_dropout,
noise_shape=tf.stack([config.conditioning_vocab_size, 1]))
embedded_source = tf.nn.embedding_lookup(embedding, self.conditioning)
if self.token_dropout is not None:
embedding = tf.nn.dropout(
embedding, 1-self.token_dropout,
noise_shape=tf.stack([config.max_time_steps, batch_size, 1]))
if config.scale_input_embeddings:
embedded_source *= tf.sqrt(tf.cast(config.input_embedding_size,
tf.float32))
conditioning_sources = embedded_source
else:
assert False, 'Not implemented.'
conditioning_cell = make_cell()
conditioning_lm = tf.make_template('cond_lm', lm_1)
initial_state = conditioning_cell.zero_state(batch_size, dtype=tf.float32)
_, conditioning_last_state = conditioning_lm(
conditioning_cell, initial_state,
conditioning_sources, self.conditioning_len)
return conditioning_last_state
cell = make_cell()
if not config.embed_once:
cell = tf.nn.rnn_cell.EmbeddingWrapper(
cell, config.vocab_size, config.input_embedding_size,
initializer=embedding_initializer)
if config.conditioning_separator:
self.initial_state = make_conditioning()
elif config.trainable_initial_state:
with tf.variable_scope('lm_init'):
self.initial_state = utils.trainable_initial_state(
batch_size, cell.state_size)
else:
self.initial_state = cell.zero_state(batch_size, dtype=tf.float32)
outputs, self.last_state = lm(
cell, self.initial_state, sources, self.source_len)
self.cell_outputs = tf.convert_to_tensor(outputs)
if config.output_once:
output_module = tf.make_template('om', output_module_1)
logits_, self.dropped_cell_outputs = output_module(outputs)
else:
assert config.activation_norm_penalty == 0.0, (
'activation_norm_penalty not implemented for output_once=False.')
output_module_per_step = tf.make_template('om', output_module_per_step_1)
# KLUDGE: calling output_module_per_step here gets rid of the
# 'rnn/FNCell/' prefix on the variables names so output_once=False and
# output_once=True checkpoints are compatible.
output_module_per_step(outputs[0])
output_cell = utils.FNCell(output_module_per_step, config.vocab_size)
logits_, _ = tf.nn.dynamic_rnn(cell=output_cell,
inputs=tf.convert_to_tensor(outputs),
time_major=True,
sequence_length=self.source_len,
swap_memory=FLAGS.swap_memory,
dtype=tf.float32)
def average_samples():
# logits has shape t_b_v, where b=b0*num_samples. Separate out
# the samples in a new dimension.
logits = tf.reshape(logits_, t_b0_s_v)
if config.model_average == 'geometric':
x = tf.reduce_sum(logits, axis=2, keepdims=True)
elif config.model_average == 'arithmetic':
log_probs = tf.nn.log_softmax(logits)
x = tf.reduce_logsumexp(log_probs, axis=2, keepdims=True)
else:
assert False, 'Not implemented.'
# x is t_b0_1_v, tile it to t_b0_s_v.
x = tf.ones_like(logits) * x
return tf.reshape(x, t_b_v)
self.logits = tf.cond(tf.equal(self.num_samples, 1),
lambda: logits_,
average_samples)
def _build_loss(self, config):
# Single sample loss (in terms of num_training_samples)
self.xe_losses = utils.seq_softmax_cross_entropy_with_logits(
self.logits, self.target, self.source_len,
config.max_time_steps, reduce_sum=False, name='lm_loss')
self.xe_loss = tf.reduce_sum(self.xe_losses, axis=0)
self.log_probs = tf.nn.log_softmax(self.logits)
if config.l2_penalty == 0.0:
self.l2_loss = 0.0
else:
self.l2_loss = tf.add_n(
[tf.nn.l2_loss(var) for var in tf.trainable_variables()])
if config.l1_penalty == 0.0:
self.l1_loss = 0.0
else:
self.l1_loss = tf.add_n(
[tf.reduce_sum(tf.abs(var)) for var in tf.trainable_variables()])
if config.activation_norm_penalty == 0.0:
self.activation_norm_loss = 0.0
else:
self.activation_norm_loss = tf.reduce_mean(
# Sum over time to make values compatible with AWD-LSTM by Merity.
tf.reduce_sum(tf.square(self.dropped_cell_outputs), axis=0))
self.unregularized_loss = tf.reduce_mean(self.xe_loss)
self.loss = (self.unregularized_loss +
config.l2_penalty * self.l2_loss +
config.l1_penalty * self.l1_loss +
config.activation_norm_penalty * self.activation_norm_loss)
def get_scopes_to_train():
scopes_to_train = ['lm', 'om']
if config.trainable_initial_state:
scopes_to_train = ['lm_init'] + scopes_to_train
if config.embed_once:
scopes_to_train = ['im'] + scopes_to_train
if config.conditioning_separator:
scopes_to_train = ['cond_im', 'cond_lm'] + scopes_to_train
return scopes_to_train
def maybe_clip_grads(grads_and_vars):
logging.info('adding grad norm clipping')
return utils.clip_gradients_in_scope(
grads_and_vars, [''], config.max_grad_norm)
optimizer_builder = utils.get_optimizer(config.optimizer_type)
optimizer = optimizer_builder(self.learning_rate, config)
scopes_to_train = get_scopes_to_train()
grads_and_vars, training_summaries = utils.create_grads(
optimizer, self.loss, scopes_to_train)
# For dyneval.
self.clipped_grads_and_vars = maybe_clip_grads(grads_and_vars)
# Single minibatch training update
self.training_update = optimizer.apply_gradients(
self.clipped_grads_and_vars, global_step=self.global_step_var)
self.training_summary = tf.summary.merge(
training_summaries + utils.summaries_for_trainables())
# Accumulation of gradients across minibatches
if config.accum_batch_size > -1:
trained_vars = [var for _, var in grads_and_vars]
grad_accumulators = [
tf.Variable(tf.zeros_like(trained_var.initialized_value()),
trainable=False)
for trained_var in trained_vars]
self.accumulate_grads = tf.group(*[
accumulator.assign_add(grads_and_vars[0])
for accumulator, grads_and_vars
in zip(grad_accumulators, grads_and_vars)])
accumulated_grads_and_vars = zip(grad_accumulators, trained_vars)
self.accumulated_training_update = optimizer.apply_gradients(
maybe_clip_grads(accumulated_grads_and_vars),
global_step=self.global_step_var)
# Zero the accumulators after the update.
with tf.control_dependencies([self.accumulated_training_update]):
self.accumulated_training_update = tf.group(
*[var.assign(tf.zeros_like(var)) for var in grad_accumulators])
logging.info('Model: adding loss gradients finished.')
def _check_budget(self, config):
num_trainables = utils.log_trainables()
if config.num_params > -1:
assert num_trainables <= config.num_params, (
'The number of trainable parameters ({}) exceeds the budget ({}). '
.format(num_trainables, config.num_params))
if num_trainables < 0.98*(config.num_params-500):
logging.warn('Number of parameters (%s) is way below the budget (%s)',
num_trainables, config.num_params)
def global_step(self, session=None):
if session is None:
session = tf.get_default_session()
return session.run(self.global_step_var)
def add_input_to_feed(self, feed, cond, cond_len, source, source_len, target):
if self.config.conditioning_separator:
feed.update({self.conditioning: cond,
self.conditioning_len: cond_len})
else:
assert cond is None
assert cond_len is None
feed.update({self.source: source,
self.source_len: source_len,
self.target: target})
return feed
def add_dropout_to_feed(self, feed, multiplier=1):
config = self.config
if self.embedding_dropout is not None:
feed.update({self.embedding_dropout: multiplier*config.embedding_dropout})
if self.token_dropout is not None:
feed.update({self.token_dropout: multiplier*config.token_dropout})
if self.input_dropout is not None:
feed.update({self.input_dropout: multiplier*config.input_dropout})
if self.inter_layer_dropout is not None:
feed.update({self.inter_layer_dropout:
multiplier*config.inter_layer_dropout})
if self.update_dropout is not None:
feed.update({self.update_dropout: multiplier*config.update_dropout})
if self.state_dropout is not None:
feed.update({self.state_dropout: multiplier*config.state_dropout})
if self.flip_prob is not None:
feed.update({self.flip_prob: multiplier*config.state_dropout_flip_rate})
if self.output_dropout is not None:
feed.update({self.output_dropout: multiplier*config.output_dropout})
if self.downprojected_output_dropout is not None:
feed.update({self.downprojected_output_dropout:
multiplier*config.downprojected_output_dropout})
return feed
def fit(self, feed, session=None):
"""Training step for observed source language example."""
if session is None:
session = tf.get_default_session()
run_options = tf.RunOptions(
report_tensor_allocations_upon_oom=True)
_, cost, summary, last_state = session.run(
[self.training_update, self.unregularized_loss, self.training_summary,
self.last_state],
feed_dict=feed, options=run_options)
return cost, summary, last_state
def accumulate_gradients(self, feed, session=None):
if session is None:
session = tf.get_default_session()
_, cost, summary, last_state = session.run(
[self.accumulate_grads, self.unregularized_loss,
self.training_summary, self.last_state],
feed_dict=feed)
return cost, summary, last_state
def fit_accumulated(self, feed, session=None):
"""Training step for observed source language example."""
if session is None:
session = tf.get_default_session()
session.run([self.accumulated_training_update], feed_dict=feed)
|
|
"""Unit test for treadmill.cli.allocation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import click
import click.testing
import mock
import treadmill
from treadmill import plugin_manager
class AllocationTest(unittest.TestCase):
"""Mock test for treadmill.cli.allocation"""
def setUp(self):
"""Setup common test variables"""
self.runner = click.testing.CliRunner()
self.alloc_cli = plugin_manager.load('treadmill.cli',
'allocation').init()
@mock.patch('treadmill.restclient.get')
@mock.patch('treadmill.restclient.delete',
mock.Mock(return_value=mock.MagicMock()))
@mock.patch('treadmill.context.Context.admin_api',
mock.Mock(return_value=['http://xxx:1234']))
def test_allocation_delete(self, get_mock):
"""Test cli.allocation: delete"""
# delete a tenant
# subtest case 1: no subtenant and reservation
# initiate two returning objects from two restclient.get invocations
return_mock1 = mock.Mock()
return_mock2 = mock.Mock()
# this one is for get all tenants
return_mock1.json.return_value = [{
'_id': None,
'tenant': 'tent',
'systems': [1, 2, 3]}]
# this one is for get all reservations under 'tent'
return_mock2.json.return_value = []
get_mock.side_effect = [return_mock1, return_mock2]
result = self.runner.invoke(self.alloc_cli,
['delete', 'tent'])
self.assertEqual(result.exit_code, 0)
treadmill.restclient.delete.assert_called_with(
['http://xxx:1234'],
'/tenant/tent'
)
calls = [mock.call(['http://xxx:1234'], '/tenant/'),
mock.call(['http://xxx:1234'], '/allocation/tent')]
get_mock.assert_has_calls(calls)
self.assertEqual(treadmill.restclient.get.call_count, 2)
# subtest case 2: has subtenant
get_mock.reset_mock()
get_mock.return_value = mock.DEFAULT
get_mock.side_effect = None
return_mock1.json.return_value = [
{'_id': None,
'tenant': 'tent',
'systems': [1, 2, 3]},
{'_id': None,
'tenant': 'tent:subtent',
'systems': [1, 2, 3]}]
get_mock.return_value = return_mock1
result = self.runner.invoke(self.alloc_cli,
['delete', 'tent'])
self.assertEqual(result.exit_code, 0)
get_mock.assert_called_once_with(['http://xxx:1234'], '/tenant/')
# subtest case 3: tenant does not exist
get_mock.reset_mock()
get_mock.return_value = mock.DEFAULT
from treadmill.restclient import NotFoundError
get_mock.side_effect = [return_mock2, NotFoundError]
result = self.runner.invoke(self.alloc_cli,
['delete', 'tent'])
self.assertEqual(result.exit_code, 1)
calls = [mock.call(['http://xxx:1234'], '/tenant/'),
mock.call(['http://xxx:1234'], '/allocation/tent')]
get_mock.assert_has_calls(calls)
self.assertEqual(treadmill.restclient.get.call_count, 2)
# subtest case 4: has reservation
get_mock.reset_mock()
get_mock.return_value = mock.DEFAULT
return_mock1.json.return_value = [
{'_id': None,
'tenant': 'tent',
'systems': [1, 2, 3]}]
return_mock2.json.return_value = [{'_id': 'tent/dev'}]
get_mock.side_effect = [return_mock1, return_mock2]
result = self.runner.invoke(self.alloc_cli,
['delete', 'tent'])
self.assertEqual(result.exit_code, 0)
calls = [mock.call(['http://xxx:1234'], '/tenant/'),
mock.call().json(),
mock.call(['http://xxx:1234'], '/allocation/tent')]
get_mock.assert_has_calls(calls)
self.assertEqual(treadmill.restclient.get.call_count, 2)
# delete all reservations
result = self.runner.invoke(self.alloc_cli,
['delete', 'tent/dev'])
self.assertEqual(result.exit_code, 0)
treadmill.restclient.delete.assert_called_with(
['http://xxx:1234'],
'/allocation/tent/dev'
)
# delete a reservation
result = self.runner.invoke(self.alloc_cli,
['delete', 'tent/dev/rr'])
self.assertEqual(result.exit_code, 0)
treadmill.restclient.delete.assert_called_with(
['http://xxx:1234'],
'/allocation/tent/dev/reservation/rr'
)
@mock.patch('treadmill.restclient.put')
@mock.patch('treadmill.restclient.get')
@mock.patch('treadmill.context.Context.admin_api',
mock.Mock(return_value=['http://xxx:1234']))
def test_allocation_configure(self, get_mock, put_mock):
"""Test cli.allocation: configure"""
get_mock.return_value.json.return_value = {'systems': [1, 2]}
self.runner.invoke(
self.alloc_cli, ['configure', 'tent/dev', '--systems', '3']
)
put_mock.assert_called_with(
[u'http://xxx:1234'],
u'/tenant/tent/dev',
payload={
u'systems': [1, 2, 3]
}
)
put_mock.reset_mock()
self.runner.invoke(
self.alloc_cli,
['configure', 'tent/dev', '--systems', '3', '--set']
)
put_mock.assert_called_with(
[u'http://xxx:1234'],
u'/tenant/tent/dev',
payload={
u'systems': [3]
}
)
@mock.patch('treadmill.restclient.put')
@mock.patch('treadmill.restclient.post')
@mock.patch('treadmill.restclient.get')
@mock.patch('treadmill.cli.allocation._display_tenant', mock.Mock())
@mock.patch('treadmill.context.Context.admin_api',
mock.Mock(return_value=['http://xxx:1234']))
def test_allocation_reserve(self, get_mock, post_mock, put_mock):
"""Test cli.allocation: reserve"""
return_mock1 = mock.Mock()
return_mock2 = mock.Mock()
return_mock1.json.return_value = [{
'_id': None,
'tenant': 'tent',
'systems': [1, 2, 3]}]
return_mock2.json.return_value = {"cpu": "0%",
"disk": "0M",
"rank_adjustment": 10,
"partition": "_default",
"memory": "0M",
"assignments": [],
"rank": 100,
"max_utilization": None,
"_id": "tent/qa/test-v3",
"cell": "test-v3",
"traits": []}
get_mock.side_effect = [return_mock1,
treadmill.restclient.NotFoundError,
return_mock1, return_mock2,
return_mock1, return_mock2]
result = self.runner.invoke(
self.alloc_cli, ['reserve', 'tent', '--env', 'qa',
'--cell', 'test-v3', '--empty'])
self.assertEqual(result.exit_code, 0)
result = self.runner.invoke(
self.alloc_cli, ['reserve', 'tent', '--env', 'qa',
'--cell', 'test-v3', '--memory', '125M',
'--partition', 'aq7'])
self.assertEqual(result.exit_code, 0)
result = self.runner.invoke(
self.alloc_cli, ['reserve', 'tent', '--env', 'qa',
'--cell', 'test-v3',
'--max-utilization', '10'])
self.assertEqual(result.exit_code, 0)
result = self.runner.invoke(
self.alloc_cli, ['reserve', 'tent', '--env', 'qa',
'--cell', 'test-v3', '--traits', 'X,Y'])
self.assertEqual(result.exit_code, 1)
call1 = mock.call(['http://xxx:1234'], '/tenant/tent')
call2 = mock.call(['http://xxx:1234'],
'/allocation/tent/qa/reservation/test-v3')
calls = [call1, call2, call1, call2, call1, call2, call1]
self.assertEqual(get_mock.call_count, 7)
get_mock.assert_has_calls(calls, any_order=False)
call1 = mock.call(['http://xxx:1234'], '/allocation/tent/qa',
payload={'environment': 'qa'})
call2 = mock.call(['http://xxx:1234'],
'/allocation/tent/qa/reservation/test-v3',
payload={'memory': '0M', 'cpu': '0%', 'disk': '0M'})
calls = [call1, call2, call1, call1]
post_mock.assert_has_calls(calls, any_order=False)
self.assertEqual(post_mock.call_count, 4)
call1 = mock.call(['http://xxx:1234'],
'/allocation/tent/qa/reservation/' +
'test-v3',
payload={'memory': '125M',
'partition': 'aq7',
'cpu': '0%',
'disk': '0M'})
call2 = mock.call(['http://xxx:1234'],
'/allocation/tent/qa/reservation/' +
'test-v3',
payload={'memory': '0M',
'partition': '_default',
'cpu': '0%',
'disk': '0M',
'max_utilization': 10})
calls = [call1, call2]
self.assertEqual(put_mock.call_count, 2)
put_mock.assert_has_calls(calls, any_order=False)
if __name__ == '__main__':
unittest.main()
|
|
"""Support for FFmpeg."""
from __future__ import annotations
import asyncio
import re
from haffmpeg.tools import IMAGE_JPEG, FFVersion, ImageFrame
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONTENT_TYPE_MULTIPART,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, ServiceCall, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
DOMAIN = "ffmpeg"
SERVICE_START = "start"
SERVICE_STOP = "stop"
SERVICE_RESTART = "restart"
SIGNAL_FFMPEG_START = "ffmpeg.start"
SIGNAL_FFMPEG_STOP = "ffmpeg.stop"
SIGNAL_FFMPEG_RESTART = "ffmpeg.restart"
DATA_FFMPEG = "ffmpeg"
CONF_INITIAL_STATE = "initial_state"
CONF_INPUT = "input"
CONF_FFMPEG_BIN = "ffmpeg_bin"
CONF_EXTRA_ARGUMENTS = "extra_arguments"
CONF_OUTPUT = "output"
DEFAULT_BINARY = "ffmpeg"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Optional(CONF_FFMPEG_BIN, default=DEFAULT_BINARY): cv.string}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_FFMPEG_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the FFmpeg component."""
conf = config.get(DOMAIN, {})
manager = FFmpegManager(hass, conf.get(CONF_FFMPEG_BIN, DEFAULT_BINARY))
await manager.async_get_version()
# Register service
async def async_service_handle(service: ServiceCall) -> None:
"""Handle service ffmpeg process."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if service.service == SERVICE_START:
async_dispatcher_send(hass, SIGNAL_FFMPEG_START, entity_ids)
elif service.service == SERVICE_STOP:
async_dispatcher_send(hass, SIGNAL_FFMPEG_STOP, entity_ids)
else:
async_dispatcher_send(hass, SIGNAL_FFMPEG_RESTART, entity_ids)
hass.services.async_register(
DOMAIN, SERVICE_START, async_service_handle, schema=SERVICE_FFMPEG_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_STOP, async_service_handle, schema=SERVICE_FFMPEG_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_RESTART, async_service_handle, schema=SERVICE_FFMPEG_SCHEMA
)
hass.data[DATA_FFMPEG] = manager
return True
@bind_hass
def get_ffmpeg_manager(hass: HomeAssistant) -> FFmpegManager:
"""Return the FFmpegManager."""
if DATA_FFMPEG not in hass.data:
raise ValueError("ffmpeg component not initialized")
return hass.data[DATA_FFMPEG]
@bind_hass
async def async_get_image(
hass: HomeAssistant,
input_source: str,
output_format: str = IMAGE_JPEG,
extra_cmd: str | None = None,
width: int | None = None,
height: int | None = None,
) -> bytes | None:
"""Get an image from a frame of an RTSP stream."""
manager = hass.data[DATA_FFMPEG]
ffmpeg = ImageFrame(manager.binary)
if width and height and (extra_cmd is None or "-s" not in extra_cmd):
size_cmd = f"-s {width}x{height}"
if extra_cmd is None:
extra_cmd = size_cmd
else:
extra_cmd += " " + size_cmd
image = await asyncio.shield(
ffmpeg.get_image(input_source, output_format=output_format, extra_cmd=extra_cmd)
)
return image
class FFmpegManager:
"""Helper for ha-ffmpeg."""
def __init__(self, hass, ffmpeg_bin):
"""Initialize helper."""
self.hass = hass
self._cache = {}
self._bin = ffmpeg_bin
self._version = None
self._major_version = None
@property
def binary(self):
"""Return ffmpeg binary from config."""
return self._bin
async def async_get_version(self):
"""Return ffmpeg version."""
ffversion = FFVersion(self._bin)
self._version = await ffversion.get_version()
self._major_version = None
if self._version is not None:
result = re.search(r"(\d+)\.", self._version)
if result is not None:
self._major_version = int(result.group(1))
return self._version, self._major_version
@property
def ffmpeg_stream_content_type(self):
"""Return HTTP content type for ffmpeg stream."""
if self._major_version is not None and self._major_version > 3:
return CONTENT_TYPE_MULTIPART.format("ffmpeg")
return CONTENT_TYPE_MULTIPART.format("ffserver")
class FFmpegBase(Entity):
"""Interface object for FFmpeg."""
def __init__(self, initial_state=True):
"""Initialize ffmpeg base object."""
self.ffmpeg = None
self.initial_state = initial_state
async def async_added_to_hass(self):
"""Register dispatcher & events.
This method is a coroutine.
"""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_START, self._async_start_ffmpeg
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_STOP, self._async_stop_ffmpeg
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_RESTART, self._async_restart_ffmpeg
)
)
# register start/stop
self._async_register_events()
@property
def available(self):
"""Return True if entity is available."""
return self.ffmpeg.is_running
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
async def _async_start_ffmpeg(self, entity_ids):
"""Start a FFmpeg process.
This method is a coroutine.
"""
raise NotImplementedError()
async def _async_stop_ffmpeg(self, entity_ids):
"""Stop a FFmpeg process.
This method is a coroutine.
"""
if entity_ids is None or self.entity_id in entity_ids:
await self.ffmpeg.close()
async def _async_restart_ffmpeg(self, entity_ids):
"""Stop a FFmpeg process.
This method is a coroutine.
"""
if entity_ids is None or self.entity_id in entity_ids:
await self._async_stop_ffmpeg(None)
await self._async_start_ffmpeg(None)
@callback
def _async_register_events(self):
"""Register a FFmpeg process/device."""
async def async_shutdown_handle(event):
"""Stop FFmpeg process."""
await self._async_stop_ffmpeg(None)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_shutdown_handle)
# start on startup
if not self.initial_state:
return
async def async_start_handle(event):
"""Start FFmpeg process."""
await self._async_start_ffmpeg(None)
self.async_write_ha_state()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, async_start_handle)
|
|
"""
parentfit.py
ParentFit is the parent class for various Fit implementations.
"""
import numpy as np
import os
import sys
import logging
from distutils.dir_util import mkpath
import random
import uuid
#~ from emcee.utils import MPIPool
from multiprocessing import Pool
from multiprocessing import cpu_count
sys.path.insert(0, os.path.abspath('..'))
from . import expectmax
from . import readparam
from . import tabletool
from . import component
from . import traceorbit
# python3 throws FileNotFoundError that is essentially the same as IOError
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def dummy_trace_orbit_func(loc, times=None):
"""
Purely for testing purposes
Dummy trace orbit func to skip irrelevant computation
A little constraint on age (since otherwise its a free floating
parameter)
"""
if times is not None:
if np.all(times > 1.):
return loc + 1000.
return loc
def log_message(msg, symbol='.', surround=False):
"""Little formatting helper"""
res = '{}{:^40}{}'.format(5 * symbol, msg, 5 * symbol)
if surround:
res = '\n{}\n{}\n{}'.format(50 * symbol, res, 50 * symbol)
logging.info(res)
class ParentFit(object):
"""
Many arguments can be taken straight from the fit_pars dictionary,
so no point explicitly looking for them.
Description of parameters can be found in README.md along with their
default values and whether they are required.
"""
OPTIMISATION_METHODS = ['emcee', 'Nelder-Mead']
# Internal filestems that Chronostar uses to store results throughout a fit
# Should not be changed, otherwise Chronostar may struggle to retreive progress
# from previous fits.
final_comps_file = 'final_comps.npy'
final_med_and_spans_file = 'final_med_and_spans.npy'
final_memb_probs_file = 'final_membership.npy'
# For detailed description of parameters, see the main README.md file
# in parent directory.
DEFAULT_FIT_PARS = {
'results_dir':'',
# Output from dataprep, XYZUVW data, plus background overlaps
# Can be a filename to a astropy table, or an actual table
'data_table':None,
# Whether to look for dX, .. c_XY or X_error, .. corr_X_Y in
# the column names
'historical_colnames':False,
# Column name for stellar IDs. This is used at the end when generating
# final fits table with IDs and membership probabilities.
# This is optional.
'stellar_id_colname': None,
# File name that points to a stored list of components, typically from
# a previous fit. Some example filenames could be:
# - 'some/prev/fit/final_comps.npy
# - 'some/prev/fit/2/A/final_comps.npy
# Alternatively, if you already have the list of components, just
# provide them to `init_comps`. Don't do both.
# 'init_comps_file':None, # TODO: Is this redundant with 'init_comps'
'init_comps':None,
# One of these two are required if initialising a run with ncomps != 1
# One can also initialise a Chronostar run with memberships.
# Array is [nstars, ncomps] float array
# Each row should sum to 1.
# Same as in 'final_membership.npy'
# TODO: implement this in a way that info can be passed in from text file
# e.g. a path to a file name
# for now, can only be used from within a script, i.e. given a numpy
# array object
'init_memb_probs':None,
# Provide a string name that corresponds to a ComponentClass
# An actual Component Class will be inserted into the paramter
# dictionary to be passed into expectmax
'component':'sphere',
'max_comp_count':20,
'max_em_iterations':200,
# Convergence criteria for when a fit_many_comps run has converged
'bic_conv_tol':0.1, # TODO: NOT TESTED!
'nthreads':1, # TODO: NOT IMPLEMENTED
'use_background':True,
'use_box_background':False,
'overwrite_prev_run':False,
'burnin':500,
'sampling_steps':1000,
'store_burnin_chains':False,
'ignore_stable_comps':True,
# If loading parameters from text file, can provide strings:
# - 'epicyclic' for epicyclic
# - 'dummy_trace_orbit_func' for a trace orbit funciton that doens't do antyhing (for testing)
# Alternativley, if building up parameter dictionary in a script, can
# provide actual function.
'trace_orbit_func':traceorbit.trace_cartesian_orbit,
# MZ
# Specify what optimisation method in the maximisation step of
# the EM algorithm to use. Default: emcee. Also available:
# In principle any method from scipy.optimise.minimise, but
# here we recommend Nelder-Mead (because the initialisation
# with any additional arguments, e.g. Jacobian etc. is not
# implemented in Chronostar).
# 'emcee' | 'Nelder-Mead'
'optimisation_method': 'emcee',
# Optimise components in parallel in expectmax.maximise.
'nprocess_ncomp': False,
# Overwrite final results in a fits file
'overwrite_fits': False,
# How to split group: in age or in space?
'split_group': 'age',
'par_log_file':'fit_pars.log',
}
def __init__(self, fit_pars):
"""
Parameters
----------
fit_pars : str -or- dictionary
If a string, `fit_pars` should be a path to a parameter file which
can be parsed by readparam.readParam, to construct a dictionary.
Alternatively, an actual dictionary can be passed in. See README.md
for a description of parameters.
"""
# Parse parameter file if required
if type(fit_pars) is str:
fit_pars = readparam.readParam(fit_pars, default_pars=self.DEFAULT_FIT_PARS)
# Make a new dictionary, with priority given to contents of fit_pars
self.fit_pars = dict(self.DEFAULT_FIT_PARS)
self.fit_pars.update(fit_pars)
assert type(self.fit_pars) is dict
try:
assert np.isin(self.fit_pars['optimisation_method'], self.OPTIMISATION_METHODS)
except AssertionError:
raise UserWarning('%s is not in %s\nMake sure no quotation marks in par file'%(
self.fit_pars['optimisation_method'], self.OPTIMISATION_METHODS
))
# MZ: Make sure 'par_log_file' is written into the results folder
self.fit_pars['par_log_file'] = os.path.join(self.fit_pars['results_dir'], self.fit_pars['par_log_file'])
# Data prep should already have been completed, so we simply build
# the dictionary of arrays from the astropy table
self.data_dict = tabletool.build_data_dict_from_table(self.fit_pars['data_table'],
historical=self.fit_pars['historical_colnames'])
# The NaiveFit approach is to assume starting with 1 component
self.ncomps = 1
# Import suitable component class
if self.fit_pars['component'] == 'sphere':
self.Component = component.SphereComponent
self.fit_pars['Component'] = component.SphereComponent
elif self.fit_pars['component'] == 'ellip':
self.Component = component.EllipComponent
self.fit_pars['Component'] = component.EllipComponent
else:
raise UserWarning('Unknown (or missing) component parametrisation')
# Check results directory is valid
# If path exists, make a new results_directory with a random int
if os.path.exists(self.fit_pars['results_dir']) and \
not self.fit_pars['overwrite_prev_run']:
rdir = '{}_{}'.format(self.fit_pars['results_dir'].rstrip('/'),
random.randint(0, 1000))
else:
rdir = self.fit_pars['results_dir']
self.rdir = rdir.rstrip('/') + '/'
mkpath(self.rdir)
assert os.access(self.rdir, os.W_OK)
# Log fit parameters,
readparam.log_used_pars(self.fit_pars, default_pars=self.DEFAULT_FIT_PARS)
# Now that results directory is set up, can set up log file
logging.basicConfig(filename=self.rdir + 'log.log', level=logging.INFO)
# Make some logs about how many iterations (+ other stuff) code can run for
log_message(msg='Component count cap set to {}'.format(
self.fit_pars['max_comp_count']),
symbol='+', surround=True)
log_message(msg='Iteration count cap set to {}'.format(
self.fit_pars['max_em_iterations']),
symbol='+', surround=True)
print('printed')
# Check nthreads does not exceed hardware
if self.fit_pars['nthreads'] > cpu_count() - 1:
raise UserWarning('Provided nthreads exceeds cpu count on this machine. '
'Rememeber to leave one cpu free for master thread!')
# MZ: If nthreads>1: create an MPIPool
if self.fit_pars['nthreads']>1:
#self.pool = MPIPool()
log_message('pool = Pool(nthreads) = pool(%d)'%self.fit_pars['nthreads'])
self.fit_pars['pool']=Pool(self.fit_pars['nthreads'])
else:
self.pool = None
# ------------------------------------------------------------
# ----- SETTING UP RUN CUSTOMISATIONS ----------------------
# ------------------------------------------------------------
# Set up trace_orbit_func
if self.fit_pars['trace_orbit_func'] == 'dummy_trace_orbit_func':
self.fit_pars['trace_orbit_func'] = dummy_trace_orbit_func
elif self.fit_pars['trace_orbit_func'] == 'epicyclic':
log_message('trace_orbit: epicyclic')
self.fit_pars['trace_orbit_func'] = traceorbit.trace_epicyclic_orbit
else:
self.fit_pars['trace_orbit_func'] = traceorbit.trace_cartesian_orbit
if type(self.fit_pars['init_comps']) is str:
self.fit_pars['init_comps'] = self.Component.load_raw_components(
self.fit_pars['init_comps'])
self.ncomps = len(self.fit_pars['init_comps'])
print('Managed to load in init_comps from file')
else:
self.fit_pars['init_comps'] = None
print("'Init comps' is initialised as none")
print('test')
# TODO: If initialising with membership probabilities, adjust self.ncomps
def build_comps_from_chains(self, run_dir):
"""
Build compoennt objects from stored emcee chains and cooresponding
lnprobs.
Parameters
----------
run_dir: str
Directory of an EM fit, which in the context of NaiveFit will be
e.g. 'myfit/1', or 'myfit/2/A'
Returns
-------
comps: [Component]
A list of components that correspond to the best fit from the
run in question.
"""
logging.info('Component class has been modified, reconstructing '
'from chain')
comps = self.ncomps * [None]
for i in range(self.ncomps):
final_cdir = run_dir + 'final/comp{}/'.format(i)
chain = np.load(final_cdir + 'final_chain.npy')
lnprob = np.load(final_cdir + 'final_lnprob.npy')
npars = len(self.Component.PARAMETER_FORMAT)
best_ix = np.argmax(lnprob)
best_pars = chain.reshape(-1, npars)[best_ix]
comps[i] = self.Component(emcee_pars=best_pars)
self.Component.store_raw_components(
str(run_dir + 'final/' + self.final_comps_file),
comps)
return comps
def log_score_comparison(self, prev, new):
"""
Purely a logging helper function.
Log BIC comparisons.
Parameters
----------
prev: dict
A dictinoary of scores from the previous run with the following entries
- bic: the Bayesian Information Criterion
- lnlike : the log likelihood
- lnpost : the log posterior
new: dict
A dictinoary of scores from the new run, with identical entries as
`prev`
Result
------
None
"""
if new['bic'] < prev['bic']:
logging.info("Extra component has improved BIC...")
logging.info(
"New BIC: {} < Old BIC: {}".format(new['bic'], prev['bic']))
else:
logging.info("Extra component has worsened BIC...")
logging.info(
"New BIC: {} > Old BIC: {}".format(new['bic'], prev['bic']))
logging.info("lnlike: {} | {}".format(new['lnlike'], prev['lnlike']))
logging.info("lnpost: {} | {}".format(new['lnpost'], prev['lnpost']))
def build_init_comps(self, prev_comps, split_comp_ix, prev_med_and_spans,
memb_probs):
"""
Given a list of converged components from a N component fit, generate
a list of N+1 components with which to initialise an EM run.
This is done by taking the target component, `prev_comps[comp_ix]`,
replacing it in the list of comps, by splitting it into two components
with a lower and higher age,
Parameters
----------
prev_comps : [N] list of Component objects
List of components from the N component fit
split_comp_ix : int
The index of component which is to be split into two
prev_med_and_spans : [ncomps,npars,3] np.array
The median and spans of
Return
------
init_comps: [N+1] list of Component objects
Side effects
------------
Updates self.fit_pars['init_comps'] with a [N+1] list of Component
objects
Edit history
------------
2020-11-14 TC: replaced explicit check for emcee vs Nelder-mead when
trying to use prev_med_and_spans. This enables emcee runs to continue
on from Nelder-mead runs, and hopefully generalises this section to
be agnostic of optimisation method
"""
target_comp = prev_comps[split_comp_ix]
assert isinstance(target_comp, self.Component)
# Decompose and replace the ith component with two new components
# by using the 16th and 84th percentile ages from previous run
if self.fit_pars['split_group']=='age':
try:
lo_age = prev_med_and_spans[split_comp_ix, -1, 1]
hi_age = prev_med_and_spans[split_comp_ix, -1, 2]
except TypeError:
age = target_comp.get_age()
lo_age = 0.8*age
hi_age = 1.2*age
except IndexError: # Added my MZ due to IndexError: too many indices for array (when using Nelder-Mead, 'final_med_and_spans.npy' is an empty file
# Maybe previous iteration was done with Nelder-Mead
age = target_comp.get_age()
lo_age = 0.8*age
hi_age = 1.2*age
split_comps = target_comp.split_group_age(lo_age=lo_age, hi_age=hi_age)
elif self.fit_pars['split_group']=='spatial':
split_comps = target_comp.split_group_spatial(self.data_dict,
memb_probs[:,split_comp_ix])
init_comps = list(prev_comps)
init_comps.pop(split_comp_ix)
init_comps.insert(split_comp_ix, split_comps[1])
init_comps.insert(split_comp_ix, split_comps[0])
return init_comps
def run_em_unless_loadable(self, run_dir):
"""
Run and EM fit, but only if not loadable from a previous run
"""
try:
# This fails when gradient descent is used and med_and_spans are not meaningful.
try:
med_and_spans = np.load(os.path.join(run_dir, 'final/', self.final_med_and_spans_file))
print('run_em_unless_loadable.try successful')
except ValueError:
logging.info('med_and_spans not read. Presumably you are using gradient descent optimisation procedure?')
print('run_em_unless_loadable.except ValueError')
med_and_spans = [None]
memb_probs = np.load(os.path.join(
run_dir, 'final/', self.final_memb_probs_file))
comps = self.Component.load_raw_components(
str(os.path.join(run_dir, 'final/', self.final_comps_file)))
logging.info('Loaded from previous run')
# Handle case where Component class has been modified and can't
# load the raw components
except AttributeError:
print('run_em_unless_loadable.except AttributeError')
# TODO: check that the final chains looked for are guaranteed to be saved
comps = self.build_comps_from_chains(run_dir)
# Handle the case where files are missing, which means we must
# perform the fit.
#~ except (IOError, FileNotFoundError) as e:
except IOError:
print('run_em_unless_loadable.except IOError')
print('run_em_unless_loadable: fitting comps', self.fit_pars['init_memb_probs'])
comps, med_and_spans, memb_probs = \
expectmax.fit_many_comps(data=self.data_dict,
ncomps=self.ncomps, rdir=run_dir,
**self.fit_pars)
# Since init_comps and init_memb_probs are only meant for one time uses
# we clear them to avoid any future usage
self.fit_pars['init_comps'] = None
self.fit_pars['init_memb_probs'] = None
return {'comps':comps, 'med_and_spans':med_and_spans, 'memb_probs':memb_probs}
def iter_end_log(self, best_split_ix, prev_result, new_result):
logging.info("Selected {} as best decomposition".format(
chr(ord('A') + best_split_ix)))
logging.info(
"Turned\n{}".format(prev_result['comps'][best_split_ix].get_pars()))
logging.info('with {} members'.format(
prev_result['memb_probs'].sum(axis=0)[best_split_ix]))
logging.info("into\n{}\n&\n{}".format(
new_result['comps'][best_split_ix].get_pars(),
new_result['comps'][best_split_ix + 1].get_pars(),
))
logging.info('with {} and {} members'.format(
new_result['memb_probs'].sum(axis=0)[best_split_ix],
new_result['memb_probs'].sum(axis=0)[best_split_ix + 1],
))
logging.info("for an overall membership breakdown\n{}".format(
new_result['memb_probs'].sum(axis=0)
))
def log_final_log(self, prev_result, prev_score):
logging.info('Final best fits:')
[logging.info(c.get_pars()) for c in prev_result['comps']]
logging.info('Final age med and span:')
if self.fit_pars['optimisation_method']=='emcee':
[logging.info(row[-1]) for row in prev_result['med_and_spans']]
logging.info('Membership distribution: {}'.format(
prev_result['memb_probs'].sum(axis=0)))
logging.info('Final membership:')
logging.info('\n{}'.format(np.round(prev_result['memb_probs'] * 100)))
logging.info('Final lnlikelihood: {}'.format(prev_score['lnlike']))
logging.info('Final lnposterior: {}'.format(prev_score['lnpost']))
logging.info('Final BIC: {}'.format(prev_score['bic']))
logging.info('#########################')
logging.info('### END #################')
logging.info('#########################')
def calc_score(self, comps, memb_probs, use_box_background=False):
"""
Calculate global score of fit for comparison with future fits with different
component counts
Parameters
----------
:param comps:
:param memb_probs:
:return:
TODO: Establish relevance of bg_ln_ols
"""
print('calc_score memb_probs', memb_probs)
lnlike = expectmax.get_overall_lnlikelihood(self.data_dict,
comps,
old_memb_probs=memb_probs,
use_box_background=use_box_background,
# bg_ln_ols=bg_ln_ols,
)
lnpost = expectmax.get_overall_lnlikelihood(self.data_dict,
comps,
# bg_ln_ols=bg_ln_ols,
old_memb_probs=memb_probs,
use_box_background=use_box_background,
inc_posterior=True)
bic = expectmax.calc_bic(self.data_dict, self.ncomps, lnlike,
memb_probs=memb_probs,
Component=self.Component)
# 2020/11/16 TC: handling the case for a bad bic.
# This comes up for the initial 1 component fit with box background
# because I haven't thought of a general way to initialise memberships
# that doesn't yield 0 background members.
if np.isnan(bic):
logging.info('Warning, bic was NaN')
bic = np.inf
return {'bic':bic, 'lnlike':lnlike, 'lnpost':lnpost}
def write_results_to_file(self, prev_result, prev_score):
"""
Various means of storing result to file
Edit history
-------------
2020-11-12 Tim Crundall
code originally by Marusa, Tim just moved it to avoid cluttering main
execution method
TODO: write fits file with id and memberships
TODO: ascii file with components today
"""
# WRITING THE FINAL RESULTS INTO FILES
logging.info("... saving previous fit as best fit to data")
self.Component.store_raw_components(self.rdir + self.final_comps_file,
prev_result['comps'])
self.Component.store_components_ascii(self.rdir + 'final_comps_ascii.txt',
prev_result['comps'], overwrite=self.fit_pars['overwrite_prev_run'])
np.save(self.rdir + self.final_med_and_spans_file, prev_result['med_and_spans'])
np.save(self.rdir + self.final_memb_probs_file, prev_result['memb_probs'])
np.save(self.rdir + 'final_likelihood_post_and_bic',
prev_score)
# Save components in fits file
tabcomps = self.Component.convert_components_array_into_astropy_table(prev_result['comps'])
if self.fit_pars['overwrite_fits']:
tabcomps.write(os.path.join(self.rdir, 'final_comps_%d.fits'%len(prev_result['comps'])), overwrite=self.fit_pars['overwrite_fits'])
else:
filename_comps_fits_random = os.path.join(self.rdir, 'final_comps_%d_%s.fits'%(len(prev_result['comps']), str(uuid.uuid4().hex)))
tabcomps.write(filename_comps_fits_random, overwrite=self.fit_pars['overwrite_fits'])
# Save membership fits file
try:
if self.fit_pars['overwrite_fits']:
tabletool.construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(self.fit_pars['data_table'], prev_result['memb_probs'], prev_result['comps'], os.path.join(self.rdir, 'final_memberships_%d.fits'%len(prev_result['comps'])), get_background_overlaps=True, stellar_id_colname = self.fit_pars['stellar_id_colname'], overwrite_fits = self.fit_pars['overwrite_fits'])
else:
filename_memb_probs_fits_random = os.path.join(self.rdir, 'final_memberships_%d_%s.fits'%(len(prev_result['comps']), str(uuid.uuid4().hex)))
tabletool.construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(self.fit_pars['data_table'], prev_result['memb_probs'], prev_result['comps'], filename_memb_probs_fits_random, get_background_overlaps=True, stellar_id_colname = self.fit_pars['stellar_id_colname'], overwrite_fits = self.fit_pars['overwrite_fits'])
except:
logging.info("[WARNING] Couldn't print membership.fits file. Check column id.")
self.log_final_log(prev_result, prev_score)
|
|
#!/usr/bin/python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is a script that generates the content and HTML files for Media Source
codec config change LayoutTests.
"""
import json
import os
DURATION = 2
MEDIA_FORMATS = ['webm', 'mp4']
ENCODE_SETTINGS = [
## Video-only files
# Frame rate changes
{'fs': '320x240', 'fr': 24, 'kfr': 8, 'c': '#ff0000', 'vbr': 128, 'abr': 0, 'asr': 0, 'ach': 0, 'afreq': 0},
{'fs': '320x240', 'fr': 30, 'kfr': 10, 'c': '#ff0000', 'vbr': 128, 'abr': 0, 'asr': 0, 'ach': 0, 'afreq': 0},
# Frame size change
{'fs': '640x480', 'fr': 30, 'kfr': 10, 'c': '#00ff00', 'vbr': 128, 'abr': 0, 'asr': 0, 'ach': 0, 'afreq': 0},
# Bitrate change
{'fs': '320x240', 'fr': 30, 'kfr': 10, 'c': '#ff00ff', 'vbr': 256, 'abr': 0, 'asr': 0, 'ach': 0, 'afreq': 0},
## Audio-only files
# Bitrate/Codebook changes
{'fs': '0x0', 'fr': 0, 'kfr': 0, 'c': '#000000', 'vbr': 0, 'abr': 128, 'asr': 44100, 'ach': 1, 'afreq': 2000},
{'fs': '0x0', 'fr': 0, 'kfr': 0, 'c': '#000000', 'vbr': 0, 'abr': 192, 'asr': 44100, 'ach': 1, 'afreq': 4000},
## Audio-Video files
# Frame size change.
{'fs': '320x240', 'fr': 30, 'kfr': 10, 'c': '#ff0000', 'vbr': 256, 'abr': 128, 'asr': 44100, 'ach': 1, 'afreq': 2000},
{'fs': '640x480', 'fr': 30, 'kfr': 10, 'c': '#00ff00', 'vbr': 256, 'abr': 128, 'asr': 44100, 'ach': 1, 'afreq': 2000},
# Audio bitrate change.
{'fs': '640x480', 'fr': 30, 'kfr': 10, 'c': '#00ff00', 'vbr': 256, 'abr': 192, 'asr': 44100, 'ach': 1, 'afreq': 4000},
# Video bitrate change.
{'fs': '640x480', 'fr': 30, 'kfr': 10, 'c': '#00ffff', 'vbr': 512, 'abr': 128, 'asr': 44100, 'ach': 1, 'afreq': 2000},
]
CONFIG_CHANGE_TESTS = [
["v-framerate", 0, 1, "Tests %s video-only frame rate changes."],
["v-framesize", 1, 2, "Tests %s video-only frame size changes."],
["v-bitrate", 1, 3, "Tests %s video-only bitrate changes."],
["a-bitrate", 4, 5, "Tests %s audio-only bitrate changes."],
["av-framesize", 6, 7, "Tests %s frame size changes in multiplexed content."],
["av-audio-bitrate", 7, 8, "Tests %s audio bitrate changes in multiplexed content."],
["av-video-bitrate", 7, 9, "Tests %s video bitrate changes in multiplexed content."]
]
CODEC_INFO = {
"mp4": {"audio": "mp4a.40.2", "video": "avc1.4D4001"},
"webm": {"audio": "vorbis", "video": "vp8"}
}
HTML_TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<script src="/w3c/resources/testharness.js"></script>
<script src="/w3c/resources/testharnessreport.js"></script>
<script src="mediasource-util.js"></script>
<script src="mediasource-config-changes.js"></script>
</head>
<body>
<div id="log"></div>
<script>
mediaSourceConfigChangeTest("%(media_format)s", "%(idA)s", "%(idB)s", "%(description)s");
</script>
</body>
</html>
"""
def run(cmd_line):
os.system(" ".join(cmd_line))
def generate_manifest(filename, media_filename, media_format, has_audio, has_video):
major_type = "audio"
if has_video:
major_type = "video"
codecs = []
if has_video:
codecs.append(CODEC_INFO[media_format]["video"])
if has_audio:
codecs.append(CODEC_INFO[media_format]["audio"])
mimetype = "%s/%s;codecs=\"%s\"" % (major_type, media_format, ",".join(codecs))
manifest = { 'url': media_filename, 'type': mimetype}
f = open(filename, "wb")
f.write(json.dumps(manifest, indent=4, separators=(',', ': ')))
f.close()
def generate_test_html(media_format, config_change_tests, encoding_ids):
for test_info in config_change_tests:
filename = "../../media-source/mediasource-config-change-%s-%s.html" % (media_format, test_info[0])
html = HTML_TEMPLATE % {'media_format': media_format,
'idA': encoding_ids[test_info[1]],
'idB': encoding_ids[test_info[2]],
'description': test_info[3] % (media_format)}
f = open(filename, "wb")
f.write(html)
f.close()
def main():
encoding_ids = []
for media_format in MEDIA_FORMATS:
run(["mkdir ", media_format])
for settings in ENCODE_SETTINGS:
video_bitrate = settings['vbr']
has_video = (video_bitrate > 0)
audio_bitrate = settings['abr']
has_audio = (audio_bitrate > 0)
bitrate = video_bitrate + audio_bitrate
frame_size = settings['fs']
frame_rate = settings['fr']
keyframe_rate = settings['kfr']
color = settings['c']
sample_rate = settings['asr']
channels = settings['ach']
frequency = settings['afreq']
cmdline = ["ffmpeg", "-y"]
id_prefix = ""
id_params = ""
if has_audio:
id_prefix += "a"
id_params += "-%sHz-%sch" % (sample_rate, channels)
channel_layout = "FC"
sin_func = "sin(%s*2*PI*t)" % frequency
func = sin_func
if channels == 2:
channel_layout += "|BC"
func += "|" + sin_func
cmdline += ["-f", "lavfi", "-i", "aevalsrc=\"%s:s=%s:c=%s:d=%s\"" % (func, sample_rate, channel_layout, DURATION)]
if has_video:
id_prefix += "v"
id_params += "-%s-%sfps-%skfr" % (frame_size, frame_rate, keyframe_rate)
cmdline += ["-f", "lavfi", "-i", "color=%s:duration=%s:size=%s:rate=%s" % (color, DURATION, frame_size, frame_rate)]
if has_audio:
cmdline += ["-b:a", "%sk" % audio_bitrate]
if has_video:
cmdline += ["-b:v", "%sk" % video_bitrate]
cmdline += ["-keyint_min", "%s" % keyframe_rate]
cmdline += ["-g", "%s" % keyframe_rate]
textOverlayInfo = "'drawtext=fontfile=Mono:fontsize=32:text=Time\\\\:\\\\ %{pts}"
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=32:text=Size\\\\:\\\\ %s" % (frame_size)
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=64:text=Bitrate\\\\:\\\\ %s" % (bitrate)
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=96:text=FrameRate\\\\:\\\\ %s" % (frame_rate)
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=128:text=KeyFrameRate\\\\:\\\\ %s" % (keyframe_rate)
if has_audio:
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=160:text=SampleRate\\\\:\\\\ %s" % (sample_rate)
textOverlayInfo += ",drawtext=fontfile=Mono:fontsize=32:y=192:text=Channels\\\\:\\\\ %s" % (channels)
textOverlayInfo += "'"
cmdline += ["-vf", textOverlayInfo]
encoding_id = "%s-%sk%s" % (id_prefix, bitrate, id_params)
if len(encoding_ids) < len(ENCODE_SETTINGS):
encoding_ids.append(encoding_id)
filename_base = "%s/test-%s" % (media_format, encoding_id)
media_filename = filename_base + "." + media_format
manifest_filename = filename_base + "-manifest.json"
cmdline.append(media_filename)
run(cmdline)
# Remux file so it conforms to MSE bytestream requirements.
if media_format == "webm":
tmp_filename = media_filename + ".tmp"
run(["mse_webm_remuxer", media_filename, tmp_filename])
run(["mv", tmp_filename, media_filename])
elif media_format == "mp4":
run(["MP4Box", "-dash", "250", "-rap", media_filename])
run(["mv", filename_base + "_dash.mp4", media_filename])
run(["rm", filename_base + "_dash.mpd"])
generate_manifest(manifest_filename, media_filename, media_format, has_audio, has_video)
generate_test_html(media_format, CONFIG_CHANGE_TESTS, encoding_ids)
if '__main__' == __name__:
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AutoProvisioningSettingsOperations(object):
"""AutoProvisioningSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AutoProvisioningSettingList"]
"""Exposes the auto provisioning settings of the subscriptions.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutoProvisioningSettingList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.security.models.AutoProvisioningSettingList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoProvisioningSettingList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AutoProvisioningSettingList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/autoProvisioningSettings'} # type: ignore
def get(
self,
setting_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AutoProvisioningSetting"
"""Details of a specific setting.
:param setting_name: Auto provisioning setting key.
:type setting_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AutoProvisioningSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.AutoProvisioningSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoProvisioningSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'settingName': self._serialize.url("setting_name", setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AutoProvisioningSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/autoProvisioningSettings/{settingName}'} # type: ignore
def create(
self,
setting_name, # type: str
setting, # type: "_models.AutoProvisioningSetting"
**kwargs # type: Any
):
# type: (...) -> "_models.AutoProvisioningSetting"
"""Details of a specific setting.
:param setting_name: Auto provisioning setting key.
:type setting_name: str
:param setting: Auto provisioning setting key.
:type setting: ~azure.mgmt.security.models.AutoProvisioningSetting
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AutoProvisioningSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.AutoProvisioningSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoProvisioningSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'settingName': self._serialize.url("setting_name", setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(setting, 'AutoProvisioningSetting')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AutoProvisioningSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/autoProvisioningSettings/{settingName}'} # type: ignore
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import random
import datetime
import bson
import six
from six.moves import http_client
from tests import FunctionalTest
from st2tests.fixtures import executions as fixture
from st2tests.fixtures import history_views
from st2common.util import isotime
from st2common.util import date as date_utils
from st2api.controllers.v1.actionexecutions import ActionExecutionsController
from st2common.persistence.execution import ActionExecution
from st2common.models.api.execution import ActionExecutionAPI
class TestActionExecutionFilters(FunctionalTest):
@classmethod
def testDownClass(cls):
pass
@classmethod
def setUpClass(cls):
super(TestActionExecutionFilters, cls).setUpClass()
cls.dt_base = date_utils.add_utc_tz(datetime.datetime(2014, 12, 25, 0, 0, 0))
cls.num_records = 100
cls.refs = {}
cls.fake_types = [
{
'trigger': copy.deepcopy(fixture.ARTIFACTS['trigger']),
'trigger_type': copy.deepcopy(fixture.ARTIFACTS['trigger_type']),
'trigger_instance': copy.deepcopy(fixture.ARTIFACTS['trigger_instance']),
'rule': copy.deepcopy(fixture.ARTIFACTS['rule']),
'action': copy.deepcopy(fixture.ARTIFACTS['actions']['chain']),
'runner': copy.deepcopy(fixture.ARTIFACTS['runners']['action-chain']),
'liveaction': copy.deepcopy(fixture.ARTIFACTS['liveactions']['workflow']),
'children': []
},
{
'action': copy.deepcopy(fixture.ARTIFACTS['actions']['local']),
'runner': copy.deepcopy(fixture.ARTIFACTS['runners']['run-local']),
'liveaction': copy.deepcopy(fixture.ARTIFACTS['liveactions']['task1'])
}
]
def assign_parent(child):
candidates = [v for k, v in cls.refs.iteritems() if v.action['name'] == 'chain']
if candidates:
parent = random.choice(candidates)
child['parent'] = str(parent.id)
parent.children.append(child['id'])
cls.refs[str(parent.id)] = ActionExecution.add_or_update(parent)
for i in range(cls.num_records):
obj_id = str(bson.ObjectId())
timestamp = cls.dt_base + datetime.timedelta(seconds=i)
fake_type = random.choice(cls.fake_types)
data = copy.deepcopy(fake_type)
data['id'] = obj_id
data['start_timestamp'] = isotime.format(timestamp, offset=False)
data['end_timestamp'] = isotime.format(timestamp, offset=False)
data['status'] = data['liveaction']['status']
data['result'] = data['liveaction']['result']
if fake_type['action']['name'] == 'local' and random.choice([True, False]):
assign_parent(data)
wb_obj = ActionExecutionAPI(**data)
db_obj = ActionExecutionAPI.to_model(wb_obj)
cls.refs[obj_id] = ActionExecution.add_or_update(db_obj)
def test_get_all(self):
response = self.app.get('/v1/executions')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), self.num_records)
self.assertEqual(response.headers['X-Total-Count'], str(self.num_records))
ids = [item['id'] for item in response.json]
self.assertListEqual(sorted(ids), sorted(self.refs.keys()))
def test_get_all_exclude_attributes(self):
# No attributes excluded
response = self.app.get('/v1/executions?action=core.local&limit=1')
self.assertEqual(response.status_int, 200)
self.assertTrue('result' in response.json[0])
# Exclude "result" attribute
path = '/v1/executions?action=core.local&limit=1&exclude_attributes=result'
response = self.app.get(path)
self.assertEqual(response.status_int, 200)
self.assertFalse('result' in response.json[0])
def test_get_one(self):
obj_id = random.choice(self.refs.keys())
response = self.app.get('/v1/executions/%s' % obj_id)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, dict)
record = response.json
fake_record = ActionExecutionAPI.from_model(self.refs[obj_id])
self.assertEqual(record['id'], obj_id)
self.assertDictEqual(record['action'], fake_record.action)
self.assertDictEqual(record['runner'], fake_record.runner)
self.assertDictEqual(record['liveaction'], fake_record.liveaction)
def test_get_one_failed(self):
response = self.app.get('/v1/executions/%s' % str(bson.ObjectId()),
expect_errors=True)
self.assertEqual(response.status_int, http_client.NOT_FOUND)
def test_limit(self):
limit = 10
refs = [k for k, v in six.iteritems(self.refs) if v.action['name'] == 'chain']
response = self.app.get('/v1/executions?action=core.chain&limit=%s' %
limit)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), limit)
self.assertEqual(response.headers['X-Limit'], str(limit))
self.assertEqual(response.headers['X-Total-Count'], str(len(refs)), response.json)
ids = [item['id'] for item in response.json]
self.assertListEqual(list(set(ids) - set(refs)), [])
def test_query(self):
refs = [k for k, v in six.iteritems(self.refs) if v.action['name'] == 'chain']
response = self.app.get('/v1/executions?action=core.chain')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), len(refs))
self.assertEqual(response.headers['X-Total-Count'], str(len(refs)))
ids = [item['id'] for item in response.json]
self.assertListEqual(sorted(ids), sorted(refs))
def test_filters(self):
excludes = ['parent', 'timestamp', 'action', 'liveaction', 'timestamp_gt',
'timestamp_lt', 'status']
for param, field in six.iteritems(ActionExecutionsController.supported_filters):
if param in excludes:
continue
value = self.fake_types[0]
for item in field.split('.'):
value = value[item]
response = self.app.get('/v1/executions?%s=%s' % (param, value))
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertGreater(len(response.json), 0)
self.assertGreater(int(response.headers['X-Total-Count']), 0)
def test_parent(self):
refs = [v for k, v in six.iteritems(self.refs)
if v.action['name'] == 'chain' and v.children]
self.assertTrue(refs)
ref = random.choice(refs)
response = self.app.get('/v1/executions?parent=%s' % str(ref.id))
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), len(ref.children))
self.assertEqual(response.headers['X-Total-Count'], str(len(ref.children)))
ids = [item['id'] for item in response.json]
self.assertListEqual(sorted(ids), sorted(ref.children))
def test_parentless(self):
refs = {k: v for k, v in six.iteritems(self.refs) if not getattr(v, 'parent', None)}
self.assertTrue(refs)
self.assertNotEqual(len(refs), self.num_records)
response = self.app.get('/v1/executions?parent=null')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), len(refs))
self.assertEqual(response.headers['X-Total-Count'], str(len(refs)))
ids = [item['id'] for item in response.json]
self.assertListEqual(sorted(ids), sorted(refs.keys()))
def test_pagination(self):
retrieved = []
page_size = 10
page_count = self.num_records / page_size
for i in range(page_count):
offset = i * page_size
response = self.app.get('/v1/executions?offset=%s&limit=%s' % (
offset, page_size))
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), page_size)
self.assertEqual(response.headers['X-Limit'], str(page_size))
self.assertEqual(response.headers['X-Total-Count'], str(self.num_records))
ids = [item['id'] for item in response.json]
self.assertListEqual(list(set(ids) - set(self.refs.keys())), [])
self.assertListEqual(sorted(list(set(ids) - set(retrieved))), sorted(ids))
retrieved += ids
self.assertListEqual(sorted(retrieved), sorted(self.refs.keys()))
def test_datetime_range(self):
dt_range = '2014-12-25T00:00:10Z..2014-12-25T00:00:19Z'
response = self.app.get('/v1/executions?timestamp=%s' % dt_range)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), 10)
self.assertEqual(response.headers['X-Total-Count'], '10')
dt1 = response.json[0]['start_timestamp']
dt2 = response.json[9]['start_timestamp']
self.assertLess(isotime.parse(dt1), isotime.parse(dt2))
dt_range = '2014-12-25T00:00:19Z..2014-12-25T00:00:10Z'
response = self.app.get('/v1/executions?timestamp=%s' % dt_range)
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
self.assertEqual(len(response.json), 10)
self.assertEqual(response.headers['X-Total-Count'], '10')
dt1 = response.json[0]['start_timestamp']
dt2 = response.json[9]['start_timestamp']
self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
def test_default_sort(self):
response = self.app.get('/v1/executions')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, list)
dt1 = response.json[0]['start_timestamp']
dt2 = response.json[len(response.json) - 1]['start_timestamp']
self.assertLess(isotime.parse(dt2), isotime.parse(dt1))
def test_filters_view(self):
response = self.app.get('/v1/executions/views/filters')
self.assertEqual(response.status_int, 200)
self.assertIsInstance(response.json, dict)
for key, value in six.iteritems(history_views.ARTIFACTS['filters']):
self.assertEqual(set(response.json[key]), set(value))
|
|
'''
Author: Andres Andreu < andres at neurofuzzsecurity dot com >
Company: neuroFuzz, LLC
Date: 7/21/2016
Last Modified: 08/18/2018
neurofuzz security SSH config hardening
###### LICENSE ###########
BSD 3-Clause License
Copyright (c) 2016 - 2018, Andres Andreu, neuroFuzz LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may
be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
###### LICENSE ###########
This prog must be run as root or with sudo since it will write
the end result file to a privileged dir: /etc/ssh
This process will write an SSH config file that is based
on whatever existed on the system. Final output will be to:
/etc/ssh/neurofuzzsecurity_sshd_config
so the standard SSH will have to be stopped and a new instance
will have to be started as such:
/usr/sbin/sshd -D -f /etc/ssh/neurofuzzsecurity_sshd_config
This will run the OpenSSH server on port 6446 unless you
decide to change that.
If you want to just use this output file as your default
then just mv
/etc/ssh/neurofuzzsecurity_sshd_config
to
/etc/ssh/sshd_config
example:
sudo mv /etc/ssh/neurofuzzsecurity_sshd_config /etc/ssh/sshd_config
and restart the SSH service as such:
sudo service ssh restart
A backup of your current sshd_config file gets put
in:
/root/.sshd_config_backups/
with a timestamp appended to the filename. This location
is used because this is run as a privileged user
To run:
sudo python3 nftk_modify_sshd_config.py
Notes:
- Take note that by default we set SSH to listen on port 6446,
if you want to change this value change it in var SSHD_PORT
- Prior to running this program and altering the target sshd_config
file you need to copy the public side of your SSK keys to that
target machine
TODOs:
- read user data (for the AllowUsers setting) from file(s)
'''
import os
import sys
import time
import shutil
import optparse
import subprocess
import platform
import syslog
from pathlib import Path
#################################################################
# populate ALLOWED_USERS as needed
ALLOWED_USERS = []
SSHD_PORT = 6446
#################################################################
def which(program=""):
''' find location (path) of executable code '''
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
tarr = os.environ["PATH"].split(os.pathsep)
tarr.append("/sbin")
for path in tarr:
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return None
#################################################################
class ssh_config(object):
def __init__(self, sshd_config_file=''):
if sshd_config_file:
self.sshd_config_file = sshd_config_file
else:
self.sshd_config_file = "/etc/ssh/sshd_config"
self.neurofuzzsecurity_sshd_config_file = "/etc/ssh/neurofuzzsecurity_sshd_config"
self.backup_original_ssh_config_file()
self.usedns_exists = False
self.rhosts_auth_exists = False
self.platform_string = platform.linux_distribution()[0].lower()
self.sshd_exe = which(program="sshd")
self.raw_lines = []
self.allowed_users = []
# read in sshd_config data
self.consume_ssh_config_file()
# default users that need SSH access
if len(ALLOWED_USERS) > 0:
for u in ALLOWED_USERS:
self.add_allowed_user(uname=u)
def backup_original_ssh_config_file(self):
''' '''
dest_path = "{}/{}".format(str(Path.home()),".sshd_config_backups")
if not os.path.exists(dest_path):
os.makedirs(dest_path)
'''
if not os.path.exists(os.path.dirname(dest_path)):
print("Please make dir: {} - example: {}".format(dest_path, "mkdir ~/.sshd_config_backup"))
sys.exit()
'''
raw_fname = self.sshd_config_file.split("/")[len(self.sshd_config_file.split("/"))-1]
shutil.copy (self.sshd_config_file, "{}/{}.backup.{}".format(dest_path,raw_fname,str(int(time.time()))))
def consume_ssh_config_file(self):
''' read in ssh config data for us to modify '''
with open(self.sshd_config_file, "r") as f:
self.raw_lines = f.readlines()
def write_ssh_config_file(self):
''' '''
if len(self.raw_lines) > 0:
with open(self.neurofuzzsecurity_sshd_config_file, "w") as f:
f.write(self.dump_modified_config())
def dump_modified_config(self):
return ''.join(self.raw_lines).strip()
def harden_ssh_config(self):
if len(self.raw_lines) > 0:
for index,item in enumerate(self.raw_lines):
#print "{} - {}".format(index,item)
if item.startswith('Port') or item.startswith('#Port'):
self.raw_lines[index] = "{} {}\n".format("Port", SSHD_PORT)
if item.startswith('Protocol') or item.startswith('#Protocol'):
self.raw_lines[index] = "{}\n".format("Protocol 2")
if item.startswith('ServerKeyBits') or item.startswith('#ServerKeyBits'):
self.raw_lines[index] = "{}\n".format("ServerKeyBits 2048")
if item.startswith('PermitRootLogin') or item.startswith('#PermitRootLogin'):
self.raw_lines[index] = "{}\n".format("PermitRootLogin no")
if item.startswith('StrictModes') or item.startswith('#StrictModes'):
self.raw_lines[index] = "{}\n".format("StrictModes yes")
if item.startswith('RSAAuthentication') or item.startswith('#RSAAuthentication'):
self.raw_lines[index] = "{}\n".format("RSAAuthentication yes")
if item.startswith('PubkeyAuthentication') or item.startswith('#PubkeyAuthentication'):
self.raw_lines[index] = "{}\n".format("PubkeyAuthentication yes")
if item.startswith('RhostsRSAAuthentication') or item.startswith('#RhostsRSAAuthentication'):
self.raw_lines[index] = "{}\n".format("RhostsRSAAuthentication no")
if item.startswith('RhostsAuthentication') or item.startswith('#RhostsAuthentication'):
self.raw_lines[index] = "{}\n".format("RhostsAuthentication no")
self.rhosts_auth_exists = True
if item.startswith('IgnoreRhosts') or item.startswith('#IgnoreRhosts'):
self.raw_lines[index] = "{}\n".format("IgnoreRhosts yes")
if item.startswith('IgnoreUserKnownHosts') or item.startswith('#IgnoreUserKnownHosts'):
self.raw_lines[index] = "{}\n".format("IgnoreUserKnownHosts yes")
if item.startswith('PasswordAuthentication') or item.startswith('#PasswordAuthentication'):
self.raw_lines[index] = "{}\n".format("PasswordAuthentication no")
if item.startswith('PermitEmptyPasswords') or item.startswith('#PermitEmptyPasswords'):
self.raw_lines[index] = "{}\n".format("PermitEmptyPasswords no")
if item.startswith('UsePAM') or item.startswith('#UsePAM'):
self.raw_lines[index] = "{}\n".format("UsePAM yes")
if item.startswith('ChallengeResponseAuthentication') or item.startswith('#ChallengeResponseAuthentication'):
self.raw_lines[index] = "{}\n".format("ChallengeResponseAuthentication no")
if item.startswith('KerberosAuthentication') or item.startswith('#KerberosAuthentication'):
self.raw_lines[index] = "{}\n".format("KerberosAuthentication no")
if item.startswith('GSSAPIAuthentication') or item.startswith('#GSSAPIAuthentication'):
self.raw_lines[index] = "{}\n".format("GSSAPIAuthentication no")
if item.startswith('AllowTcpForwarding') or item.startswith('#AllowTcpForwarding'):
self.raw_lines[index] = "{}\n".format("AllowTcpForwarding no")
if item.startswith('X11Forwarding') or item.startswith('#X11Forwarding'):
self.raw_lines[index] = "{}\n".format("X11Forwarding no")
if item.startswith('PrintMotd') or item.startswith('#PrintMotd'):
self.raw_lines[index] = "{}\n".format("PrintMotd no")
if item.startswith('GatewayPorts') or item.startswith('#GatewayPorts'):
self.raw_lines[index] = "{}\n".format("GatewayPorts no")
if item.startswith('TCPKeepAlive') or item.startswith('#TCPKeepAlive'):
self.raw_lines[index] = "{}\n".format("TCPKeepAlive yes")
if item.startswith('PermitUserEnvironment') or item.startswith('#PermitUserEnvironment'):
self.raw_lines[index] = "{}\n".format("PermitUserEnvironment no")
if item.startswith('UsePrivilegeSeparation') or item.startswith('#UsePrivilegeSeparation'):
self.raw_lines[index] = "{}\n".format("UsePrivilegeSeparation yes")
if item.startswith('Banner') or item.startswith('#Banner'):
self.raw_lines[index] = "{}\n".format("Banner none")
if item.startswith('UseDNS') or item.startswith('#UseDNS'):
self.raw_lines[index] = "{}\n".format("UseDNS no")
#USEDNS_EXISTS = True
self.usedns_exists = True
if item.strip().endswith('sftp-server'):
'''
some use spaces, others use tabs
examples:
Subsystem sftp /usr/lib/openssh/sftp-server
Subsystem sftp /usr/libexec/sftp-server
Subsystem sftp /usr/lib/sftp-server
'''
if '\t' in item:
tmp_item = item.strip().split('\t')
else:
tmp_item = item.strip().split()
sftp_name = tmp_item[len(tmp_item) - 1]
self.raw_lines[index] = "{} {}\n".format("#Subsystem sftp", sftp_name)
if not self.usedns_exists:
self.raw_lines.append("{}\n".format("UseDNS no"))
'''
# looks like this is deprecated
if not self.rhosts_auth_exists:
self.raw_lines.append("{}\n".format("RhostsAuthentication no"))
'''
if self.platform_string == 'debian':
self.raw_lines.append("{}\n".format("DebianBanner no"))
self.raw_lines.append("\n{}\n".format("KexAlgorithms [email protected],diffie-hellman-group-exchange-sha256"))
self.raw_lines.append("\n{}\n".format("Ciphers [email protected],[email protected],[email protected],aes256-ctr,aes192-ctr,aes128-ctr"))
self.raw_lines.append("\n{}\n".format("MACs [email protected],[email protected],[email protected],hmac-sha2-512,hmac-sha2-256,[email protected]"))
if len(self.allowed_users) > 0:
# add default users
# AllowUsers name1,name2
self.raw_lines.append("\n{} {}".format("AllowUsers", ' '.join(self.allowed_users)))
def add_allowed_user(self, uname):
if uname and uname not in self.allowed_users:
self.allowed_users.append(uname)
def validate_sshd_config(self):
ret = True
proc = subprocess.Popen([self.sshd_exe, '-t', '-f', self.neurofuzzsecurity_sshd_config_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out,err = proc.communicate()
'''
if out:
print("OUT: {}".format(out))
'''
if err:
#print("ERR: {}".format(err))
ret = False
return ret
'''
API
'''
def neurofuzzsecurity_generate_hardened_ssh_config():
'''
returns True if the newly generated SSHD config is validated
successfully, otherwise returns False
'''
sshdcfg = ssh_config()
sshdcfg.harden_ssh_config()
sshdcfg.write_ssh_config_file()
return sshdcfg.validate_sshd_config()
if __name__ == "__main__":
print(neurofuzzsecurity_generate_hardened_ssh_config())
'''
Research @ https://stribika.github.io/2015/01/04/secure-secure-shell.html
Notes:
###############################################################################
Key Exchange Algo's
OpenSSH supports 8 key exchange protocols:
curve25519-sha256: ECDH over Curve25519 with SHA2
diffie-hellman-group1-sha1: 1024 bit DH with SHA1
diffie-hellman-group14-sha1: 2048 bit DH with SHA1
diffie-hellman-group-exchange-sha1: Custom DH with SHA1
diffie-hellman-group-exchange-sha256: Custom DH with SHA2
ecdh-sha2-nistp256: ECDH over NIST P-256 with SHA2
ecdh-sha2-nistp384: ECDH over NIST P-384 with SHA2
ecdh-sha2-nistp521: ECDH over NIST P-521 with SHA2
We have to look at 3 things here:
1. ECDH curve choice: This eliminates 6-8 because NIST curves suck. They leak secrets through timing side channels and off-curve inputs. Also, NIST is considered harmful and cannot be trusted.
2. Bit size of the DH modulus: This eliminates 2 because the NSA has supercomputers and possibly unknown attacks. 1024 bits simply don't offer sufficient security margin.
3. Security of the hash function: This eliminates 2-4 because SHA1 is broken. We don't have to wait for a second preimage attack that takes 10 minutes on a cellphone to disable it right now.
We are left with 1 and 5. 1 is better and it's perfectly OK to only support that but for interoperability (with Eclipse, WinSCP), 5 can be included.
Hence we add:
KexAlgorithms [email protected],diffie-hellman-group-exchange-sha256
###############################################################################
Data encryption:
Symmetric ciphers are used to encrypt the data after the initial key exchange and authentication is complete.
Here we have quite a few algorithms:
3des-cbc
aes128-cbc
aes192-cbc
aes256-cbc
aes128-ctr
aes192-ctr
aes256-ctr
[email protected]
[email protected]
arcfour
arcfour128
arcfour256
blowfish-cbc
cast128-cbc
[email protected]
We have to consider the following:
1. Security of the cipher algorithm: This eliminates 1 and 10-12 - both DES and RC4 are broken. Again, no need to wait for them to become even weaker, disable them now.
2. Key size: At least 128 bits, the more the better.
3. Block size: Does not apply to stream ciphers. At least 128 bits. This eliminates 13 and 14 because those have a 64 bit block size.
4. Cipher mode: The recommended approach here is to prefer AE modes and optionally allow CTR for compatibility. CTR with Encrypt-then-MAC is provably secure.
Chacha20-poly1305 is preferred over AES-GCM because the SSH protocol does not encrypt message sizes when GCM (or EtM) is in use. This allows some traffic analysis even without decrypting the data.
Hence we add:
Ciphers [email protected],[email protected],[email protected],aes256-ctr,aes192-ctr,aes128-ctr
###############################################################################
Message Authentication Codes
Encryption provides confidentiality, message authentication code provides integrity. We need both. If an AE cipher mode is selected, then extra MACs are not used, the integrity is already given. If CTR is selected, then we need a MAC to calculate and attach a tag to every message.
There are multiple ways to combine ciphers and MACs - not all of these are useful. The 3 most common:
Encrypt-then-MAC: encrypt the message, then attach the MAC of the ciphertext.
MAC-then-encrypt: attach the MAC of the plaintext, then encrypt everything.
Encrypt-and-MAC: encrypt the message, then attach the MAC of the plaintext.
Only Encrypt-then-MAC should be used, period. Using MAC-then-encrypt have lead to many attacks on TLS while Encrypt-and-MAC have lead to not quite that many attacks on SSH. The reason for this is that the more you fiddle with an attacker provided message, the more chance the attacker has to gain information through side channels. In case of Encrypt-then-MAC, the MAC is verified and if incorrect, discarded. Boom, one step, no timing channels. In case of MAC-then-encrypt, first the attacker provided message has to be decrypted and only then can you verify it. Decryption failure (due to invalid CBC padding for example) may take less time than verification failure. Encrypt-and-MAC also has to be decrypted first, leading to the same kind of potential side channels. It's even worse because no one said that a MAC's output can't leak what its input was. SSH by default, uses this method.
Here are the available MAC choices:
hmac-md5
hmac-md5-96
hmac-sha1
hmac-sha1-96
hmac-sha2-256
hmac-sha2-512
umac-64
umac-128
[email protected]
[email protected]
[email protected]
[email protected]
[email protected]
[email protected]
[email protected]
[email protected]
The selection considerations:
1. Security of the hash algorithm: No MD5 and SHA1. Yes, I know that HMAC-SHA1 does not need collision resistance but why wait? Disable weak crypto today.
2. Encrypt-then-MAC: I am not aware of a security proof for CTR-and-HMAC but I also don't think CTR decryption can fail. Since there are no downgrade attacks, you can add them to the end of the list. You can also do this on a host by host basis so you know which ones are less safe.
3. Tag size: At least 128 bits. This eliminates umac-64-etm.
4. Key size: At least 128 bits. This doesn't eliminate anything at this point.
Hence we add:
MACs [email protected],[email protected],[email protected],hmac-sha2-512,hmac-sha2-256,[email protected]
'''
|
|
"""Module mimics some of the behaviors of the builtin :mod:`shutil`.
It adds logging to all operations and abstracting some other useful shell
commands (functions).
"""
import os
import io
import glob
import shlex
import shutil
import fnmatch
import logging
import contextlib
logger = logging.getLogger(__name__)
def split(s, posix=True):
"""Split the string s using shell-like syntax.
Args:
s (str): String to split
posix (bool): Use posix split
Returns:
list of str: List of string parts
"""
if isinstance(s, bytes):
s = s.decode("utf-8")
return shlex.split(s, posix=posix)
def search(path, matcher="*", dirs=False, files=True):
"""Recursive search function.
Args:
path (str): Path to search recursively
matcher (str or callable): String pattern to search for or function
that returns True/False for a file argument
dirs (bool): if True returns directories that match the pattern
files(bool): if True returns files that match the patter
Yields:
str: Found files and directories
"""
if callable(matcher):
def fnmatcher(items):
return list(filter(matcher, items))
else:
def fnmatcher(items):
return fnmatch.filter(items, matcher)
for root, directories, filenames in os.walk(os.path.abspath(path)):
to_match = []
if dirs:
to_match.extend(directories)
if files:
to_match.extend(filenames)
for item in fnmatcher(to_match):
yield os.path.join(root, item)
def chdir(directory):
"""Change the current working directory.
Args:
directory (str): Directory to go to.
"""
directory = os.path.abspath(directory)
logger.info("chdir -> %s" % directory)
try:
if not os.path.isdir(directory):
logger.error(
"chdir -> %s failed! Directory does not exist!", directory
)
return False
os.chdir(directory)
return True
except Exception as e:
logger.error("chdir -> %s failed! %s" % (directory, e))
return False
@contextlib.contextmanager
def goto(directory, create=False):
"""Context object for changing directory.
Args:
directory (str): Directory to go to.
create (bool): Create directory if it doesn't exists.
Usage::
>>> with goto(directory) as ok:
... if not ok:
... print 'Error'
... else:
... print 'All OK'
"""
current = os.getcwd()
directory = os.path.abspath(directory)
if os.path.isdir(directory) or (create and mkdir(directory)):
logger.info("goto -> %s", directory)
os.chdir(directory)
try:
yield True
finally:
logger.info("goto <- %s", directory)
os.chdir(current)
else:
logger.info(
"goto(%s) - directory does not exist, or cannot be " "created.",
directory,
)
yield False
def mkdir(path, mode=0o755, delete=False):
"""Make a directory.
Create a leaf directory and all intermediate ones.
Works like ``mkdir``, except that any intermediate path segment (not just
the rightmost) will be created if it does not exist. This is recursive.
Args:
path (str): Directory to create
mode (int): Directory mode
delete (bool): Delete directory/file if exists
Returns:
bool: True if succeeded else False
"""
logger.info("mkdir: %s" % path)
if os.path.isdir(path):
if not delete:
return True
if not remove(path):
return False
try:
os.makedirs(path, mode)
return True
except Exception:
logger.exception("Failed to mkdir: %s" % path)
return False
def __create_destdir(destination):
destdir = os.path.dirname(destination)
if not os.path.isdir(destdir):
if not mkdir(destdir):
raise Exception('Failed to create "%s"' % destdir)
def __copyfile(source, destination):
"""Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("copyfile: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy(source, destination)
return True
except Exception as e:
logger.error(
"copyfile: %s -> %s failed! Error: %s", source, destination, e
)
return False
def __copyfile2(source, destination):
"""Copy data and all stat info ("cp -p source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("copyfile2: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy2(source, destination)
return True
except Exception as e:
logger.error(
"copyfile2: %s -> %s failed! Error: %s", source, destination, e
)
return False
def __copytree(source, destination, symlinks=False):
"""Copy a directory tree recursively using copy2().
The destination directory must not already exist.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
Args:
source (str): Source directory (directory to copy).
destination (str): Destination directory (where to copy).
symlinks (bool): Follow symbolic links.
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("copytree: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copytree(source, destination, symlinks)
return True
except Exception as e:
logger.exception(
"copytree: %s -> %s failed! Error: %s", source, destination, e
)
return False
def copy(source, destination):
"""Copy file or directory.
Args:
source (str): Source file or directory
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
"""
if os.path.isdir(source):
return __copytree(source, destination)
else:
return __copyfile2(source, destination)
def gcopy(pattern, destination):
"""Copy all file found by glob.glob(pattern) to destination directory.
Args:
pattern (str): Glob pattern
destination (str): Path to the destination directory.
Returns:
bool: True if the operation is successful, False otherwise.
"""
for item in glob.glob(pattern):
if not copy(item, destination):
return False
return True
def move(source, destination):
"""Move a file or directory (recursively) to another location.
If the destination is on our current file system, then simply use
rename. Otherwise, copy source to the destination and then remove
source.
Args:
source (str): Source file or directory (file or directory to move).
destination (str): Destination file or directory (where to move).
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("Move: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.move(source, destination)
return True
except Exception:
logger.exception("Failed to Move: %s -> %s" % (source, destination))
return False
def gmove(pattern, destination):
"""Move all file found by glob.glob(pattern) to destination directory.
Args:
pattern (str): Glob pattern
destination (str): Path to the destination directory.
Returns:
bool: True if the operation is successful, False otherwise.
"""
for item in glob.glob(pattern):
if not move(item, destination):
return False
return True
def __rmfile(path):
"""Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("rmfile: %s" % path)
try:
os.remove(path)
return True
except Exception as e:
logger.error("rmfile: %s failed! Error: %s" % (path, e))
return False
def __rmtree(path):
"""Recursively delete a directory tree.
Args:
path (str): Path to the directory that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
"""
logger.info("rmtree: %s" % path)
try:
shutil.rmtree(path)
return True
except Exception as e:
logger.error("rmtree: %s failed! Error: %s" % (path, e))
return False
def remove(path):
"""Delete a file or directory.
Args:
path (str): Path to the file or directory that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
"""
if os.path.isdir(path):
return __rmtree(path)
else:
return __rmfile(path)
def gremove(pattern):
"""Remove all file found by glob.glob(pattern).
Args:
pattern (str): Pattern of files to remove
Returns:
bool: True if the operation is successful, False otherwise.
"""
for item in glob.glob(pattern):
if not remove(item):
return False
return True
def read(path, encoding="utf-8"):
"""Read the content of the file.
Args:
path (str): Path to the file
encoding (str): File encoding. Default: utf-8
Returns:
str: File content or empty string if there was an error
"""
try:
with io.open(path, encoding=encoding) as f:
return f.read()
except Exception as e:
logger.error("read: %s failed. Error: %s", path, e)
return ""
def touch(path, content="", encoding="utf-8", overwrite=False):
"""Create a file at the given path if it does not already exists.
Args:
path (str): Path to the file.
content (str): Optional content that will be written in the file.
encoding (str): Encoding in which to write the content.
Default: ``utf-8``
overwrite (bool): Overwrite the file if exists.
Returns:
bool: True if the operation is successful, False otherwise.
"""
path = os.path.abspath(path)
if not overwrite and os.path.exists(path):
logger.warning('touch: "%s" already exists', path)
return False
try:
logger.info("touch: %s", path)
with io.open(path, "wb") as f:
if not isinstance(content, bytes):
content = content.encode(encoding)
f.write(content)
return True
except Exception as e:
logger.error("touch: %s failed. Error: %s", path, e)
return False
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CAQL network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from caql import dual_ibp_method
from caql import dual_method
FLAGS = flags.FLAGS
class CaqlNet(object):
"""CAQL network class."""
def __init__(self,
session,
state_spec,
action_spec,
hidden_layers,
learning_rate,
learning_rate_action,
learning_rate_ga,
batch_size,
action_maximization_iterations,
name,
l2_loss_flag=False,
simple_lambda_flag=True,
solver=None,
sufficient_ascent_flag=False,
initial_lambda=10.0,
lambda_max=5e3):
"""Creates CAQL networks.
Args:
session: TF session.
state_spec: tf_agents.specs.array_spec.ArraySpec. Specification for state.
action_spec: tf_agents.specs.array_spec.ArraySpec. Specification for
action.
hidden_layers: list of integers. Number of hidden units for each hidden
layer.
learning_rate: float on Q function learning rate.
learning_rate_action: float on action function learning rate.
learning_rate_ga: float. Learning rate for gradient ascent optimizer.
batch_size: int on batch size for training.
action_maximization_iterations: int on CEM/gradient ascent iterations.
name: string on name of network.
l2_loss_flag: bool on using l2 loss.
simple_lambda_flag: bool on using lambda hinge loss.
solver: string on inner max optimizer. Supported optimizers are
"gradient_ascent", "cross_entropy", "ails", "mip".
sufficient_ascent_flag: bool on using sufficient ascent.
initial_lambda: float on initial lambda (only for simple_lambda_flag).
lambda_max: float on lambda upper-bound.
"""
self._session = session
self.state_spec = state_spec
self.action_spec = action_spec
self.state_dim = state_spec.shape[0]
self.action_dim = action_spec.shape[0]
self.action_max = action_spec.maximum
self.action_min = action_spec.minimum
self.hidden_layers = hidden_layers
self.learning_rate = learning_rate
self.learning_rate_action = learning_rate_action
self.learning_rate_ga = learning_rate_ga
self.batch_size = batch_size
self.action_maximization_iterations = action_maximization_iterations
self.name = name
self.lambda_max = lambda_max
if solver == "ails" or solver == "mip":
raise ValueError("AILS and MIP solvers are not supported yet.")
# define placeholders
self._state_tensor = tf.placeholder(
dtype=tf.float32, name="state_tensor", shape=(None, self.state_dim))
self._state_deviation_tensor = tf.placeholder(
dtype=tf.float32,
name="state_deviation_tensor",
shape=(None, self.state_dim))
self._action_tensor = tf.placeholder(
dtype=tf.float32, name="action_tensor", shape=(None, self.action_dim))
self._next_state_tensor = tf.placeholder(
dtype=tf.float32,
name="next_state_tensor",
shape=(None, self.state_dim))
self._reward_tensor = tf.placeholder(
dtype=tf.float32, name="reward_tensor", shape=(None, 1))
self._done_tensor = tf.placeholder(
dtype=tf.bool, name="done_tensor", shape=(None, 1))
self._discount_factor = tf.placeholder(
dtype=tf.float32, name="discounting_factor", shape=())
self._maxq_label = tf.placeholder(
dtype=tf.float32, shape=(None, 1), name="maxq_label")
self._backup_tensor = self._reward_tensor + (1.0 - tf.to_float(
self._done_tensor)) * self._discount_factor * self._maxq_label
self._true_label = tf.placeholder(
dtype=tf.float32, shape=(None, 1), name="true_label")
self.q_function_network = self._build_q_function_net(
self._state_tensor, self._action_tensor)
self.state_perturbed_q_function_network = self.q_function_network \
+ tf.expand_dims(tf.einsum("ij,ij->i",
tf.gradients(self.q_function_network,
self._state_tensor)[0],
self._state_deviation_tensor),
axis=-1)
self._td_rmse = tf.sqrt(
tf.losses.mean_squared_error(
self._reward_tensor + (1.0 - tf.to_float(self._done_tensor)) *
self._discount_factor * self._maxq_label, self.q_function_network))
if simple_lambda_flag:
with tf.variable_scope("{}_{}".format(self.name, "lambda_function")):
lambda_var = tf.Variable(
initial_value=initial_lambda, trainable=True, name="lambda_var")
self.lambda_function_network = tf.tile(
tf.reshape(
tf.minimum(
lambda_max, tf.maximum(0.0, lambda_var),
name="lambda_proj"), (-1, 1)), (self.batch_size, 1))
else:
self.lambda_function_network = self._build_lambda_function_net(
self._state_tensor, self._action_tensor)
# define loss
if l2_loss_flag:
self._q_function_loss = tf.losses.mean_squared_error(
self._true_label, self.q_function_network)
else:
self._q_function_loss = tf.reduce_mean(
self.q_function_network + self.lambda_function_network *
tf.maximum(0.0, self._true_label - self.q_function_network))
self._lambda_function_loss = tf.reduce_mean(
-self.lambda_function_network *
(self._true_label - self.q_function_network))
# Action network to learn argmax of Q
self._best_q_label = tf.placeholder(
dtype=tf.float32, shape=(None, 1), name="best_q_label")
# create network placeholders
self._create_network_var_ph()
self.action_function_network = self._build_action_function_net(
self._state_tensor)
self.dummy_q_function_network = self._build_q_function_net(
self._state_tensor, self.action_function_network)
self._action_function_loss = tf.losses.mean_squared_error(
self._best_q_label, self.dummy_q_function_network)
# optimizer
# NOTE: Increment this by one by inlcuding it only in main_q trainer.
global_step = tf.Variable(
0, name="{}_global_step".format(self.name), trainable=False)
with tf.variable_scope("{}_{}".format(self.name, "optimizer")):
self._action_function_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(
self._action_function_loss,
var_list=tf.trainable_variables("{}_{}".format(
self.name, "action_function")))
self._q_function_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(
self._q_function_loss,
global_step=global_step,
var_list=tf.trainable_variables("{}_{}".format(
self.name, "q_function")))
self._lambda_function_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(
self._lambda_function_loss,
var_list=tf.trainable_variables("{}_{}".format(
self.name, "lambda_function")))
# Tensors for dual solvers
self._create_dual_maxq_label_tensor()
self._create_dual_active_constraint_condition_tensor()
self.solver = solver
self.sufficient_ascent_flag = sufficient_ascent_flag
def _create_network_var_ph(self):
"""Create network variable placeholders."""
self._dummy_network_var_ph = {}
self._vars_tf = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope="{}_{}".format(self.name, "q_function"))
for _, var in enumerate(self._vars_tf):
# define placeholder for weights
self._dummy_network_var_ph["{}_ph".format(var.name)] = tf.placeholder(
dtype=tf.float32, shape=var.shape)
def _create_cross_entropy_action_tensors(self,
num_samples=200,
top_k_portion=0.5):
"""Create tensorflow operations for cross_entropy max_actions."""
top_k_num = int(top_k_portion * num_samples)
self._dynamic_batch_size = tf.placeholder(
dtype=tf.int32, name="dynamic_batch_size")
self._action_init_tensor = tf.placeholder(
dtype=tf.float32,
name="action_init_tensor",
shape=(None, self.action_dim))
self._tolerance_tensor = tf.placeholder(
dtype=tf.float32, name="tolerance_tensor", shape=())
sample_mean_init = self._action_init_tensor
sample_covariance_diag_init = tf.ones_like(self._action_init_tensor)
top_k_value_init = tf.constant(
[np.inf]) * tf.ones(shape=(self._dynamic_batch_size, 1))
top_k_action_samples_init = tf.tile(
tf.expand_dims(tf.zeros_like(self._action_init_tensor), axis=1),
[1, top_k_num, 1])
random_sampler = tfp.distributions.MultivariateNormalDiag(
loc=np.zeros(self.action_dim), scale_diag=np.ones(self.action_dim))
def cond_cross_entropy(itr, cond_terminate, sample_mean,
sample_covariance_diag, top_k_value,
top_k_action_samples):
del sample_mean, sample_covariance_diag, top_k_value, top_k_action_samples
cond_1 = tf.math.less(itr, self.action_maximization_iterations)
return tf.math.logical_and(cond_1, tf.logical_not(cond_terminate))
def body_cross_entropy(itr, cond_terminate, sample_mean,
sample_covariance_diag, top_k_value,
top_k_action_samples):
"""Function for cross entropy search of actions."""
del top_k_action_samples
top_k_value_prev = top_k_value
batch_sample_mean = tf.reshape(
tf.tile(sample_mean, [1, num_samples]),
[self._dynamic_batch_size * num_samples, self.action_dim])
batch_sample_covariance_diag = tf.reshape(
tf.tile(sample_covariance_diag, [1, num_samples]),
[self._dynamic_batch_size * num_samples, self.action_dim])
action_samples = self._action_projection(
batch_sample_mean + batch_sample_covariance_diag * tf.cast(
random_sampler.sample(
sample_shape=[self._dynamic_batch_size * num_samples]),
dtype=tf.float32))
state_samples = tf.reshape(
tf.tile(self._state_tensor, [1, num_samples]),
[self._dynamic_batch_size * num_samples, self.state_dim])
action_samples = tf.reshape(
action_samples,
[self._dynamic_batch_size * num_samples, self.action_dim])
values = tf.reshape(
self._build_q_function_net(state_samples, action_samples),
[self._dynamic_batch_size, num_samples])
# everything is in batch mode
top_k_index = tf.argsort(
values, axis=1, direction="DESCENDING")[:, 0:top_k_num]
top_k_index_1d = tf.reshape(top_k_index,
[self._dynamic_batch_size * top_k_num, 1])
counter_tensor_1d = tf.reshape(
tf.tile(
tf.reshape(
tf.range(self._dynamic_batch_size),
[self._dynamic_batch_size, 1]), [1, top_k_num]),
[self._dynamic_batch_size * top_k_num, 1])
top_k_index_2d = tf.concat([counter_tensor_1d, top_k_index_1d], axis=1)
action_samples = tf.reshape(
action_samples,
[self._dynamic_batch_size, num_samples, self.action_dim])
top_k_action_samples = tf.gather_nd(action_samples, top_k_index_2d)
top_k_action_samples = tf.reshape(
top_k_action_samples,
[self._dynamic_batch_size, top_k_num, self.action_dim])
top_k_values = tf.gather_nd(values, top_k_index_2d)
top_k_values = tf.reshape(top_k_values,
[self._dynamic_batch_size, top_k_num])
# it's a batch_size x 1 tensor
top_k_value = tf.reshape(
tf.reduce_mean(top_k_values, axis=1), [self._dynamic_batch_size, 1])
sample_mean = tf.reduce_mean(top_k_action_samples, axis=1)
sample_covariance_diag = tf.math.reduce_variance(
top_k_action_samples, axis=1)
itr = itr + 1
cond_terminate = tf.less_equal(
tf.reduce_mean(tf.math.abs(top_k_value - top_k_value_prev)),
self._tolerance_tensor)
return itr, cond_terminate, sample_mean, sample_covariance_diag, \
top_k_value, top_k_action_samples
self.cost_optimizer = tf.while_loop(
cond_cross_entropy, body_cross_entropy, [
tf.constant(0),
tf.constant(False), sample_mean_init, sample_covariance_diag_init,
top_k_value_init, top_k_action_samples_init
])
def _create_gradient_ascent_action_tensors(self, eps=1e-6):
"""Create tensorflow operations for gradient ascent max_actions."""
self._action_init_tensor = tf.placeholder(
dtype=tf.float32,
name="action_init_tensor",
shape=(None, self.action_dim))
self._tolerance_tensor = tf.placeholder(
dtype=tf.float32, name="tolerance_tensor", shape=())
with tf.variable_scope("{}_{}".format(self.name, "action_variable")):
self._action_variable_tensor = tf.Variable(
initial_value=self._action_init_tensor,
trainable=True,
name="action_var")
# gradient ascentd
self.cost_now = -tf.reduce_mean(
self._build_q_function_net(self._state_tensor,
self._action_variable_tensor))
self.action_gradient = tf.gradients(self.cost_now,
self._action_variable_tensor)[0]
# normalize the gradient
self.normalized_action_gradient = self.action_gradient / (
eps + tf.linalg.norm(self.action_gradient))
if self.sufficient_ascent_flag:
def cond_sufficient_descent(learning_rate_action,
cond_sufficient_descent, cost_perturbed):
del cost_perturbed
cond_1 = tf.math.greater(learning_rate_action,
self.learning_rate_action)
return tf.math.logical_and(cond_1,
tf.logical_not(cond_sufficient_descent))
def body_sufficient_descent(learning_rate_action,
cond_sufficient_descent,
cost_perturbed,
c_armijo=0.01,
c_goldstein=0.25,
lr_decay=0.1):
"""Function for sufficient descent."""
del cond_sufficient_descent, cost_perturbed
action_variable_perturbed_tensor = self._action_variable_tensor - \
learning_rate_action * self.normalized_action_gradient
cost_perturbed = -tf.reduce_mean(
self._build_q_function_net(self._state_tensor,
action_variable_perturbed_tensor))
# Here the negative gradient corresponds to maximization of Q fun.
sufficient_descent = tf.reduce_sum(self.action_gradient *
-self.normalized_action_gradient)
goldstein_condition = tf.greater_equal(
cost_perturbed, self.cost_now +
c_goldstein * learning_rate_action * sufficient_descent)
armijo_condition = tf.less_equal(
cost_perturbed, self.cost_now +
c_armijo * learning_rate_action * sufficient_descent)
cond_sufficient_descent = tf.logical_and(goldstein_condition,
armijo_condition)
with tf.control_dependencies([cond_sufficient_descent]):
learning_rate_action = learning_rate_action * lr_decay
return learning_rate_action, cond_sufficient_descent, cost_perturbed
# Construct the while loop.
def cond_gradient_ascent(itr, cond_terminate):
cond_1 = tf.math.less(itr, self.action_maximization_iterations)
return tf.math.logical_and(cond_1, tf.logical_not(cond_terminate))
def body_gradient_ascent(itr, cond_terminate, lr_init=100.0):
"""Function for gradient descent."""
del cond_terminate
if self.sufficient_ascent_flag:
# first calculate sufficeint descent
result_sufficient_descent = tf.while_loop(
cond_sufficient_descent, body_sufficient_descent,
[tf.constant(lr_init),
tf.constant(False),
tf.constant(np.inf)])
lr_action = result_sufficient_descent[0]
cost_perturbed = result_sufficient_descent[2]
cond_terminate = tf.less_equal(
tf.math.abs(cost_perturbed - self.cost_now),
self._tolerance_tensor)
else:
# no sufficient descent step
lr_action = self.learning_rate_ga
action_variable_perturbed_tensor = self._action_variable_tensor - \
lr_action * self.normalized_action_gradient
cost_perturbed = -tf.reduce_mean(
self._build_q_function_net(self._state_tensor,
action_variable_perturbed_tensor))
cond_terminate = tf.less_equal(
tf.math.abs(cost_perturbed - self.cost_now),
self._tolerance_tensor)
train_op = tf.train.GradientDescentOptimizer(
learning_rate=lr_action).apply_gradients(
grads_and_vars=[(self.normalized_action_gradient,
self._action_variable_tensor)])
# Ensure that the update is applied before continuing.
with tf.control_dependencies([train_op]):
itr = itr + 1
return itr, cond_terminate
self.cost_optimizer = tf.while_loop(
cond_gradient_ascent, body_gradient_ascent,
[tf.constant(0), tf.constant(False)])
self.action_init_op = tf.initializers.variables(
tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope="{}_{}".format(self.name, "action_variable")))
def _create_dual_maxq_label_tensor(self, method="duality_based"):
"""Approximate the maxq label with dual."""
w_transpose_list = []
b_transpose_list = []
num_layers = 1
for itr, var in enumerate(self._vars_tf):
if itr % 2 == 0:
# even itr, multiplicative weights
if itr == 0:
wx_transpose = self._dummy_network_var_ph["{}_ph".format(
var.name)][:self.state_dim, :]
w_transpose_list.append(self._dummy_network_var_ph["{}_ph".format(
var.name)][self.state_dim:, :])
else:
w_transpose_list.append(self._dummy_network_var_ph["{}_ph".format(
var.name)])
num_layers += 1
else:
# odd itr, additive weights
if itr == 1:
b_transpose_list.append(
tf.tile(
tf.expand_dims(
self._dummy_network_var_ph["{}_ph".format(var.name)],
axis=0), [self.batch_size, 1]) +
tf.matmul(self._next_state_tensor, wx_transpose))
else:
b_transpose_list.append(
tf.tile(
tf.expand_dims(
self._dummy_network_var_ph["{}_ph".format(var.name)],
axis=0), [self.batch_size, 1]))
action_tensor_center = tf.zeros(shape=[self.batch_size, self.action_dim])
l_infty_norm_bound = np.max(self.action_max)
if method == "duality_based":
self.dual_maxq_tensor = dual_method.create_dual_approx(
num_layers, self.batch_size, l_infty_norm_bound, w_transpose_list,
b_transpose_list, action_tensor_center)
elif method == "ibp":
# ibp dual solver
self.dual_maxq_tensor = dual_ibp_method.create_dual_ibp_approx(
num_layers, self.batch_size, l_infty_norm_bound, w_transpose_list,
b_transpose_list, action_tensor_center)
else:
# mix method
dual_maxq_tensor = dual_method.create_dual_approx(
num_layers, self.batch_size, l_infty_norm_bound, w_transpose_list,
b_transpose_list, action_tensor_center)
dual_ibp_maxq_tensor = dual_ibp_method.create_dual_ibp_approx(
num_layers, self.batch_size, l_infty_norm_bound, w_transpose_list,
b_transpose_list, action_tensor_center)
# minimum of the upper-bound
self.dual_maxq_tensor = tf.minimum(dual_maxq_tensor, dual_ibp_maxq_tensor)
def _create_dual_active_constraint_condition_tensor(self):
"""Create active constraint condition."""
# It's a 1D boolean tensor with length=batch_size
self.dual_active_constraint_condition_tensor = tf.reshape(
tf.math.greater(self._backup_tensor, self.q_function_network), [-1])
def _action_projection(self, action):
"""Action projection."""
if isinstance(action, np.ndarray):
return np.minimum(self.action_spec.maximum,
np.maximum(self.action_spec.minimum, action))
else:
# tf version
return tf.minimum(
self.action_spec.maximum,
tf.maximum(self.action_spec.minimum, tf.cast(action, tf.float32)))
def _build_action_function_net(self, state):
"""Build action network."""
# define network
with tf.variable_scope(
"{}_{}".format(self.name, "action_function"),
reuse=tf.compat.v1.AUTO_REUSE):
net = tf.layers.flatten(state, name="flatten_0")
for i, hidden_units in enumerate(self.hidden_layers):
net = tf.layers.dense(net, hidden_units, name="dense_%d" % i)
net = tf.layers.batch_normalization(net)
net = tf.nn.relu(net)
net = tf.layers.dense(net, self.action_dim, name="action_output")
# make sure actions are bounded
net = self._action_projection(net)
return net
def _build_q_function_net(self, state, action):
"""Build q_function network."""
# define network
with tf.variable_scope(
"{}_{}".format(self.name, "q_function"), reuse=tf.compat.v1.AUTO_REUSE):
net = tf.layers.flatten(state, name="q_flatten_0")
net = tf.concat([net, action], axis=-1)
for i, hidden_units in enumerate(self.hidden_layers):
net = tf.layers.dense(
net, hidden_units, activation=tf.nn.relu, name="q_dense_%d" % i)
net = tf.layers.dense(net, 1, name="q_output")
return net
def _build_lambda_function_net(self, state, action):
"""Build lambda_function network."""
# define network
with tf.variable_scope(
"{}_{}".format(self.name, "lambda_function"),
reuse=tf.compat.v1.AUTO_REUSE):
net = tf.layers.flatten(state, name="lambda_flatten_0")
net = tf.concat([net, action], axis=-1)
for i, hidden_units in enumerate(self.hidden_layers):
net = tf.layers.dense(
net,
hidden_units,
activation=tf.nn.relu,
name="lambda_dense_%d" % i)
net = tf.layers.dense(net, 1, name="lambda_output")
net = tf.minimum(
self.lambda_max,
tf.maximum(0.0, tf.cast(net, tf.float32)),
name="lambda_proj")
return net
def predict_action_function(self, state):
"""Predict action function.
Predict the best action for the given state using action function.
Args:
state: np.ndarray for state.
Returns:
Tensor for the predicted best action for the given `state`.
"""
state_tensor = np.reshape(state, [-1, self.state_dim])
return self._session.run(
self.action_function_network,
feed_dict={
self._state_tensor: state_tensor,
})
def predict_q_function(self, state, action):
"""Predict Q function.
Args:
state: np.ndarray for state.
action: np.ndarray for action.
Returns:
Tensorfor the predicted Q value for the given `state` and `action` pair.
"""
state_tensor = np.reshape(state, [-1, self.state_dim])
action_tensor = np.reshape(action, [-1, self.action_dim])
return self._session.run(
self.q_function_network,
feed_dict={
self._state_tensor: state_tensor,
self._action_tensor: action_tensor
})
def predict_state_perturbed_q_function(self, centroid_states,
centroid_actions, state_deviation):
"""Predict state perturbed Q function.
Args:
centroid_states: np.ndarray for centroid states.
centroid_actions: np.ndarray for the actions of the centroid states.
state_deviation: np.ndarray for the vector distance between non-centroid
states and their centroids.
Returns:
Tensor for the predicted Q values for the non-centroid states.
"""
centroid_states = np.reshape(centroid_states, [-1, self.state_dim])
centroid_actions = np.reshape(centroid_actions, [-1, self.action_dim])
state_deviation = np.reshape(state_deviation, [-1, self.state_dim])
return self._session.run(
self.state_perturbed_q_function_network,
feed_dict={
self._state_tensor: centroid_states,
self._action_tensor: centroid_actions,
self._state_deviation_tensor: state_deviation
})
def predict_lambda_function(self, state, action):
"""Predict lambda function.
Args:
state: np.ndarray for state.
action: np.ndarray for action.
Returns:
Tensor for the predicted lambda for the given `state` and `action` pair.
"""
state_tensor = np.reshape(state, [-1, self.state_dim])
action_tensor = np.reshape(action, [-1, self.action_dim])
return self._session.run(
self.lambda_function_network,
feed_dict={
self._state_tensor: state_tensor,
self._action_tensor: action_tensor
})
def compute_backup(self, maxq_labels, rewards, dones, discount_factor):
"""Compute Bellman backup.
Args:
maxq_labels: np.ndarray for max-Q labels.
rewards: np.ndarray for immediate rewards.
dones: np.ndarray for done flags. True if a state is a terminating state,
False otherwise.
discount_factor: float. Discount factor gamma.
Returns:
Tensor for TD targets.
"""
maxq_label = np.reshape(maxq_labels, [-1, 1])
reward_tensor = np.reshape(rewards, [-1, 1])
done_tensor = np.reshape(dones, [-1, 1])
feed = {
self._maxq_label: maxq_label,
self._reward_tensor: reward_tensor,
self._done_tensor: done_tensor,
self._discount_factor: discount_factor
}
return self._session.run(self._backup_tensor, feed_dict=feed)
def compute_td_rmse(self, states, actions, maxq_labels, rewards, dones,
discount_factor):
"""Compute TD rmse.
Args:
states: np.ndarray for states.
actions: np.ndarray for actions.
maxq_labels: np.ndarray for max-Q labels.
rewards: np.ndarray for immediate rewards.
dones: np.ndarray for done flags. True if a state is a terminating state,
False otherwise.
discount_factor: float. Discount factor gamma.
Returns:
Tensor for TD RMSE.
"""
state_tensor = np.reshape(states, [-1, self.state_spec.shape[0]])
action_tensor = np.reshape(actions, [-1, self.action_spec.shape[0]])
maxq_label = np.reshape(maxq_labels, [-1, 1])
reward_tensor = np.reshape(rewards, [-1, 1])
done_tensor = np.reshape(dones, [-1, 1])
feed = {
self._state_tensor: state_tensor,
self._action_tensor: action_tensor,
self._maxq_label: maxq_label,
self._reward_tensor: reward_tensor,
self._done_tensor: done_tensor,
self._discount_factor: discount_factor
}
return self._session.run(self._td_rmse, feed_dict=feed)
def compute_dual_active_constraint_condition(self, states, actions,
dual_maxq_labels, rewards, dones,
discount_factor):
"""Compute dual active constraint condition.
Args:
states: np.ndarray for states.
actions: np.ndarray for actions.
dual_maxq_labels: np.ndarray for max-Q labels computed by dual method.
rewards: np.ndarray for immediate rewards.
dones: np.ndarray for done flags. True if a state is a terminating state,
False otherwise.
discount_factor: float. Discount factor gamma.
Returns:
Tensor for bool flags. True if a TD target is larger than a predicted
Q value for a pair of state and action.
"""
state_tensor = np.reshape(states, [-1, self.state_dim])
action_tensor = np.reshape(actions, [-1, self.action_dim])
dual_maxq_label = np.reshape(dual_maxq_labels, [-1, 1])
reward_tensor = np.reshape(rewards, [-1, 1])
done_tensor = np.reshape(dones, [-1, 1])
feed = {
self._state_tensor: state_tensor,
self._action_tensor: action_tensor,
self._maxq_label: dual_maxq_label,
self._reward_tensor: reward_tensor,
self._done_tensor: done_tensor,
self._discount_factor: discount_factor
}
return self._session.run(
self.dual_active_constraint_condition_tensor, feed_dict=feed)
def compute_best_actions(self, states, tolerance, warmstart=True,
tf_summary_vals=None):
"""Compute best actions.
Args:
states: np.ndarray for states.
tolerance: float. Optimizer tolerance. This is used as a stopping
condition for the optimizer.
warmstart: bool on warmstarting flag.
tf_summary_vals: list to store tf.Summary.Value objects.
Returns:
Tensor for the best actions for the given `states`.
"""
state_tensor = np.reshape(states, [-1, self.state_dim])
assert len(state_tensor) > 0
if tf_summary_vals is not None:
tf_summary_vals.append(
tf.Summary.Value(tag="tolerance", simple_value=tolerance))
# profiling the batch action maximization.
ts_begin = time.time()
if self.solver == "gradient_ascent":
if not hasattr(self, "_action_init_tensor"):
print("Create action variables for gradient ascent.")
self._create_gradient_ascent_action_tensors()
best_actions = self.gradient_ascent_best_actions(state_tensor, tolerance,
warmstart,
tf_summary_vals)
elif self.solver == "cross_entropy":
if not hasattr(self, "_action_init_tensor"):
print("Create action variables for cross entropy.")
self._create_cross_entropy_action_tensors()
best_actions = self.cross_entropy_best_actions(state_tensor, tolerance,
warmstart, tf_summary_vals)
elif self.solver == "ails" or self.solver == "mip":
raise ValueError("AILS and MIP solvers are not supported yet.")
else:
raise ValueError("Solver is not implemented!")
elapsed_in_msecs = int((time.time() - ts_begin) * 1000)
if tf_summary_vals is not None:
tf_summary_vals.append(
tf.Summary.Value(
tag="batch_maxq/elapsed_msec", simple_value=elapsed_in_msecs))
return best_actions
def cross_entropy_best_actions(self, state_tensor, tolerance_tensor,
warmstart, tf_summary_vals=None):
"""Get best action with cross entropy for train network."""
dynamic_batch_size = len(state_tensor)
if warmstart:
action_init_tensor = self.predict_action_function(state_tensor)
else:
# randomly sample actions
action_init_tensor = self.action_min + np.random.rand(
dynamic_batch_size, self.action_dim) * (
self.action_max - self.action_min)
feed = {
self._state_tensor: state_tensor,
self._tolerance_tensor: tolerance_tensor,
self._action_init_tensor: action_init_tensor,
self._dynamic_batch_size: dynamic_batch_size
}
vars_vals = self._session.run(self._vars_tf)
for var, val in zip(self._vars_tf, vars_vals):
feed[self._dummy_network_var_ph["{}_ph".format(var.name)]] = val
# 1) maximize actions through cross entropy
result = self._session.run(self.cost_optimizer, feed_dict=feed)
if tf_summary_vals is not None:
tf_summary_vals.append(
tf.Summary.Value(tag="batch_maxq/iterations", simple_value=result[0]))
# itr, cond_terminate, sample_mean, sample_covariance_diag,
# top_k_value, top_k_actions
top_k_actions = result[-1]
return top_k_actions[:, 0, :]
def gradient_ascent_best_actions(self, state_tensor, tolerance_tensor,
warmstart, tf_summary_vals=None):
"""Get best action with gradient ascent for train network."""
dynamic_batch_size = len(state_tensor)
if warmstart:
action_init_tensor = self.predict_action_function(state_tensor)
else:
# randomly sample actions
action_init_tensor = self.action_min + np.random.rand(
dynamic_batch_size, self.action_dim) * (
self.action_max - self.action_min)
# 1) initialize tensors in feed_dict
feed = {
self._state_tensor: state_tensor,
self._tolerance_tensor: tolerance_tensor,
self._action_init_tensor: action_init_tensor
}
vars_vals = self._session.run(self._vars_tf)
for var, val in zip(self._vars_tf, vars_vals):
feed[self._dummy_network_var_ph["{}_ph".format(var.name)]] = val
# 2) initialize action variable in dummy q_network
self._session.run(self.action_init_op, feed_dict=feed)
# 3) maximize actions through gradient ascent
result = self._session.run(self.cost_optimizer, feed_dict=feed)
if tf_summary_vals is not None:
tf_summary_vals.append(
tf.Summary.Value(tag="batch_maxq/iterations", simple_value=result[0]))
# 4) get max action solutions
return self._action_projection(
self._session.run(self._action_variable_tensor))
def compute_dual_maxq_label(self, next_states):
"""Compute max Q label via the dual method.
Args:
next_states: np.ndarray for states.
Returns:
Tensor for the best action for the given `next_states` computed by the
duality.
"""
feed = {self._next_state_tensor: next_states}
vars_vals = self._session.run(self._vars_tf)
for var, val in zip(self._vars_tf, vars_vals):
feed[self._dummy_network_var_ph["{}_ph".format(var.name)]] = val
return self._session.run(self.dual_maxq_tensor, feed_dict=feed)
def batch_train_action_function(self, state_tensor_stack, best_q_stack):
"""Train action function.
Args:
state_tensor_stack: np.ndarray for states.
best_q_stack: np.ndarray for the max-Q labels.
Returns:
TF op for the action function loss.
"""
feed = {
self._state_tensor: state_tensor_stack,
self._best_q_label: best_q_stack,
}
vars_vals = self._session.run(self._vars_tf)
for var, val in zip(self._vars_tf, vars_vals):
feed[self._dummy_network_var_ph["{}_ph".format(var.name)]] = val
action_function_loss, _ = self._session.run(
[self._action_function_loss, self._action_function_optimizer],
feed_dict=feed)
return action_function_loss
def batch_train_q_function(self, state_tensor_stack, action_tensor_stack,
true_label_stack):
"""Train Q function function.
Args:
state_tensor_stack: np.ndarray for states.
action_tensor_stack: np.ndarray for actions.
true_label_stack: np.ndarray for the TD targets.
Returns:
TF op for the Q function loss.
"""
feed = {
self._state_tensor: state_tensor_stack,
self._action_tensor: action_tensor_stack,
self._true_label: true_label_stack,
}
q_function_loss, _ = self._session.run(
[self._q_function_loss, self._q_function_optimizer], feed_dict=feed)
return q_function_loss
def batch_train_lambda_function(self, state_tensor_stack, action_tensor_stack,
true_label_stack):
"""Train lambda function.
Args:
state_tensor_stack: np.ndarray for states.
action_tensor_stack: np.ndarray for actions.
true_label_stack: np.ndarray for the TD targets.
Returns:
TF op for the lambda function loss.
"""
feed = {
self._state_tensor: state_tensor_stack,
self._action_tensor: action_tensor_stack,
self._true_label: true_label_stack,
}
lambda_function_loss, _ = self._session.run(
[self._lambda_function_loss, self._lambda_function_optimizer],
feed_dict=feed)
return lambda_function_loss
|
|
from __future__ import division, print_function
import imp
import os
import sys
import shutil
import pickle
import copy
import warnings
import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers = ["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [16, 12, 8]
expected['Py_intptr_t'] = [8, 4]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [8, 4]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "\
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers = ["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def,
expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"\
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info, default_lib_dirs
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform=='win32' or os.name=='nt':
win32_checks(moredefs)
# C99 restrict keyword
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
'include/numpy/fenv/fenv.c',
'include/numpy/fenv/fenv.h',
join(codegen_dir, 'genapi.py'),
]
# Don't install fenv unless we need them.
if sys.platform == 'cygwin':
config.add_data_dir('include/numpy/fenv')
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources = [join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src'),
join(local_dir, 'src', 'private', 'templ_common.h.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
join('src', 'private', 'templ_common.h.src'),
]
blas_info = get_info('blas_opt', 0)
if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
join('src', 'multiarray', 'python_xerbla.c'),
])
else:
extra_info = {}
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
config.add_extension('multiarray',
sources=multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends=deps + multiarray_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'scalarmath.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources = umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends = deps + umath_deps,
libraries = ['npymath'],
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources = [join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources = [join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__=='__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
import mock
import boto
from boto.pyami.config import Config
from boto.regioninfo import RegionInfo, load_endpoint_json, merge_endpoints
from boto.regioninfo import load_regions, get_regions, connect
from tests.unit import unittest
class TestRegionInfo(object):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
def connect(self, **kwargs):
return self.connection_cls(region=self)
class FakeConn(object):
def __init__(self, region, **kwargs):
self.region = region
self.kwargs = kwargs
class TestEndpointLoading(unittest.TestCase):
def setUp(self):
super(TestEndpointLoading, self).setUp()
def test_load_endpoint_json(self):
endpoints = load_endpoint_json(boto.ENDPOINTS_PATH)
self.assertTrue('partitions' in endpoints)
def test_merge_endpoints(self):
defaults = {
'ec2': {
'us-east-1': 'ec2.us-east-1.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
}
}
additions = {
# Top-level addition.
's3': {
'us-east-1': 's3.amazonaws.com'
},
'ec2': {
# Overwrite. This doesn't exist, just test data.
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
# Deep addition.
'us-west-2': 'ec2.us-west-2.amazonaws.com',
}
}
endpoints = merge_endpoints(defaults, additions)
self.assertEqual(endpoints, {
'ec2': {
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
'us-west-2': 'ec2.us-west-2.amazonaws.com',
},
's3': {
'us-east-1': 's3.amazonaws.com'
}
})
def test_load_regions(self):
# Just the defaults.
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertFalse('test-1' in endpoints['ec2'])
# With ENV overrides.
os.environ['BOTO_ENDPOINTS'] = os.path.join(
os.path.dirname(__file__),
'test_endpoints.json'
)
self.addCleanup(os.environ.pop, 'BOTO_ENDPOINTS')
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertTrue('test-1' in endpoints['ec2'])
self.assertEqual(endpoints['ec2']['test-1'], 'ec2.test-1.amazonaws.com')
def test_get_regions(self):
# With defaults.
ec2_regions = get_regions('ec2')
self.assertTrue(len(ec2_regions) >= 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertTrue(isinstance(west_2, RegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, None)
def test_get_regions_overrides(self):
ec2_regions = get_regions(
'ec2',
region_cls=TestRegionInfo,
connection_cls=FakeConn
)
self.assertTrue(len(ec2_regions) >= 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertFalse(isinstance(west_2, RegionInfo))
self.assertTrue(isinstance(west_2, TestRegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, FakeConn)
class TestConnectToRegion(unittest.TestCase):
def test_connect(self):
connection = connect(
'ec2', 'us-west-2', connection_cls=FakeConn)
self.assertEqual(connection.region.name, 'us-west-2')
expected_endpoint = 'ec2.us-west-2.amazonaws.com'
self.assertEqual(connection.region.endpoint, expected_endpoint)
def test_does_not_use_heuristics_by_default(self):
connection = connect(
'ec2', 'us-southeast-43', connection_cls=FakeConn)
self.assertIsNone(connection)
def test_uses_region_override(self):
connection = connect(
'ec2', 'us-west-2', connection_cls=FakeConn,
region_cls=TestRegionInfo
)
self.assertIsInstance(connection.region, TestRegionInfo)
self.assertEqual(connection.region.name, 'us-west-2')
expected_endpoint = 'ec2.us-west-2.amazonaws.com'
self.assertEqual(connection.region.endpoint, expected_endpoint)
def test_use_heuristics_via_env_var(self):
# With ENV overrides.
os.environ['BOTO_USE_ENDPOINT_HEURISTICS'] = 'True'
self.addCleanup(os.environ.pop, 'BOTO_USE_ENDPOINT_HEURISTICS')
connection = connect(
'ec2', 'us-southeast-43', connection_cls=FakeConn,
region_cls=TestRegionInfo)
self.assertIsNotNone(connection)
self.assertEqual(connection.region.name, 'us-southeast-43')
expected_endpoint = 'ec2.us-southeast-43.amazonaws.com'
self.assertEqual(connection.region.endpoint, expected_endpoint)
def test_use_heuristics_via_config(self):
config = mock.Mock(spec=Config)
def _getbool(section, name, default=False):
if section == 'Boto' and name == 'use_endpoint_heuristics':
return True
return default
config.getbool = _getbool
config.get.return_value = None
with mock.patch('boto.config', config):
connection = connect(
'ec2', 'us-southeast-43', connection_cls=FakeConn,
region_cls=TestRegionInfo)
self.assertIsNotNone(connection)
self.assertEqual(connection.region.name, 'us-southeast-43')
expected_endpoint = 'ec2.us-southeast-43.amazonaws.com'
self.assertEqual(connection.region.endpoint, expected_endpoint)
def test_connect_with_hueristics_without_explicit_regioninfo(self):
os.environ['BOTO_USE_ENDPOINT_HEURISTICS'] = 'True'
self.addCleanup(os.environ.pop, 'BOTO_USE_ENDPOINT_HEURISTICS')
connection = connect(
'ec2', 'us-southeast-43', connection_cls=FakeConn)
self.assertIsNotNone(connection)
self.assertIsInstance(connection.region, RegionInfo)
self.assertEqual(connection.region.name, 'us-southeast-43')
expected_endpoint = 'ec2.us-southeast-43.amazonaws.com'
self.assertEqual(connection.region.endpoint, expected_endpoint)
if __name__ == '__main__':
unittest.main()
|
|
"""Parse Python source and extract unresolved symbols."""
import ast
import sys
from contextlib import contextmanager
from itertools import chain
from importmagic.six import string_types
from importmagic.util import parse_ast
try:
import builtins as __builtin__
except:
import __builtin__
class _InvalidSymbol(Exception):
pass
class Scope(object):
GLOBALS = ['__name__', '__file__', '__loader__', '__package__', '__path__']
PYTHON3_BUILTINS = ['PermissionError']
ALL_BUILTINS = set(dir(__builtin__)) | set(GLOBALS) | set(PYTHON3_BUILTINS)
def __init__(self, parent=None, define_builtins=True, is_class=False):
self._parent = parent
self._definitions = set()
self._references = set()
self._children = []
self._cursors = [self]
self._cursor = self
self._define_builtins = define_builtins
self._is_class = is_class
if define_builtins:
self._define_builtin_symbols()
self._add_symbol = []
self._symbol = []
@contextmanager
def start_symbol(self):
self._add_symbol.append(self._add_symbol[-1] if self._add_symbol else self.reference)
try:
yield self
finally:
self.flush_symbol()
@contextmanager
def start_definition(self):
self._add_symbol.append(self.define)
try:
yield self
finally:
self.flush_symbol()
@contextmanager
def start_reference(self):
self._add_symbol.append(self.reference)
try:
yield self
finally:
self.flush_symbol()
def extend_symbol(self, segment, extend_only=False):
if extend_only and not self._symbol:
return
self._symbol.append(segment)
def end_symbol(self):
if self._symbol:
add = self._add_symbol[-1] if self._add_symbol else self.reference
add('.'.join(self._symbol))
self._symbol = []
def flush_symbol(self):
self.end_symbol()
if self._add_symbol:
self._add_symbol.pop()
@classmethod
def from_source(cls, src, trace=False, define_builtins=True):
scope = Scope(define_builtins=define_builtins)
visitor = UnknownSymbolVisitor(scope, trace=trace)
if isinstance(src, string_types):
src = parse_ast(src)
visitor.visit(src)
scope.flush_symbol()
return scope
def _define_builtin_symbols(self):
self._cursor._definitions.update(Scope.ALL_BUILTINS)
def define(self, name):
if '.' in name:
self.reference(name)
else:
self._cursor._definitions.add(name)
def reference(self, name):
self._cursor._references.add(name)
@contextmanager
def enter(self, is_class=False):
child = Scope(self._cursor, is_class=is_class, define_builtins=self._define_builtins)
self._cursor._children.append(child)
self._cursors.append(child)
self._cursor = child
try:
yield child
finally:
child.end_symbol()
self._cursors.pop()
self._cursor = self._cursors[-1]
def find_unresolved_and_unreferenced_symbols(self):
"""Find any unresolved symbols, and unreferenced symbols from this scope.
:returns: ({unresolved}, {unreferenced})
"""
unresolved = set()
unreferenced = self._definitions.copy()
self._collect_unresolved_and_unreferenced(set(), set(), unresolved, unreferenced,
frozenset(self._definitions), start=True)
return unresolved, unreferenced - Scope.ALL_BUILTINS
def _collect_unresolved_and_unreferenced(self, definitions, definitions_excluding_top,
unresolved, unreferenced, top, start=False):
scope_definitions = definitions | self._definitions
scope_definitions_excluding_top = definitions_excluding_top | (set() if start else self._definitions)
# When we're in a class, don't export definitions to descendant scopes
if not self._is_class:
definitions = scope_definitions
definitions_excluding_top = scope_definitions_excluding_top
for reference in self._references:
symbols = set(_symbol_series(reference))
# Symbol has no definition anywhere in ancestor scopes.
if symbols.isdisjoint(scope_definitions):
unresolved.add(reference)
# Symbol is referenced only in the top level scope.
elif not symbols.isdisjoint(top) and symbols.isdisjoint(scope_definitions_excluding_top):
unreferenced -= symbols
# Recurse
for child in self._children:
child._collect_unresolved_and_unreferenced(
definitions, definitions_excluding_top, unresolved, unreferenced, top,
)
def __repr__(self):
return 'Scope(definitions=%r, references=%r, children=%r)' \
% (self._definitions - Scope.ALL_BUILTINS, self._references, self._children)
def _symbol_series(s):
tokens = s.split('.')
return ['.'.join(tokens[:n + 1]) for n in range(len(tokens))]
class UnknownSymbolVisitor(ast.NodeVisitor):
def __init__(self, scope=None, trace=False):
super(UnknownSymbolVisitor, self).__init__()
self._scope = scope or Scope()
self._trace = trace
def visit(self, node):
if node is None:
return
elif isinstance(node, list):
for subnode in node:
self.visit(subnode)
return
if self._trace:
print(node, vars(node))
method = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if method is not None:
try:
method(node)
except Exception:
# print >> sys.stderr, node, vars(node)
raise
else:
self.generic_visit(node)
self._scope.end_symbol()
def visit_Raise(self, node):
if hasattr(node, 'type'): # Python 2: raise A[, B[, C]]
with self._scope.start_reference():
self.visit(node.type)
with self._scope.start_reference():
self.visit(node.inst)
with self._scope.start_reference():
self.visit(node.tback)
else: # Python 3: raise A[ from B]
with self._scope.start_reference():
self.visit(node.exc)
with self._scope.start_reference():
self.visit(node.cause)
def visit_TryExcept(self, node):
for sub in node.body:
with self._scope.start_reference():
self.visit(sub)
self.visit(node.handlers)
for n in node.orelse:
with self._scope.start_reference():
self.visit(n)
def visit_ExceptHandler(self, node):
with self._scope.start_reference():
self.visit(node.type)
with self._scope.start_definition():
if isinstance(node.name, str):
# Python 3
self._scope.extend_symbol(node.name)
else:
self.visit(node.name)
for n in node.body:
with self._scope.start_reference():
self.visit(n)
def visit_Return(self, node):
with self._scope.start_reference():
self.visit(node.value)
def visit_If(self, node):
with self._scope.start_reference():
self.visit(node.test)
for child in node.body:
with self._scope.start_reference():
self.visit(child)
for child in node.orelse:
with self._scope.start_reference():
self.visit(child)
def visit_While(self, node):
return self.visit_If(node)
def visit_FunctionDef(self, node):
self._scope.define(node.name)
self.visit_Lambda(node)
def visit_Lambda(self, node):
for decorator in getattr(node, 'decorator_list', []):
with self._scope.start_reference() as scope:
self.visit(decorator)
with self._scope.enter() as scope:
with scope.start_definition():
args = node.args
for arg in [args.kwarg, args.vararg]:
if arg:
# arg is either an "arg" object (Python 3.4+) or a str
scope.define(arg.arg if hasattr(arg, 'arg') else arg)
# kwonlyargs was added in Python 3
for arg in args.args + getattr(args, 'kwonlyargs', []):
scope.define(arg.id if hasattr(arg, 'id') else arg.arg)
for default in args.defaults:
self.visit(default)
body = [node.body] if isinstance(node, ast.Lambda) else node.body
with scope.start_reference():
for statement in body:
self.visit(statement)
def visit_ListComp(self, node):
return self.visit_GeneratorExp(node)
def visit_Print(self, node):
for value in node.values:
with self._scope.start_reference():
self.visit(value)
if node.dest:
with self._scope.start_reference():
self.visit(node.dest)
def visit_GeneratorExp(self, node):
with self._scope.start_reference():
self.visit(node.elt)
for elt in node.generators:
self.visit(elt)
def visit_comprehension(self, node):
with self._scope.start_definition():
self.visit(node.target)
with self._scope.start_reference():
self.visit(node.iter)
for elt in node.ifs:
with self._scope.start_reference():
self.visit(elt)
def visit_Assign(self, node):
for target in node.targets:
with self._scope.start_definition():
self.visit(target)
with self._scope.start_reference():
self.visit(node.value)
def visit_ClassDef(self, node):
for decorator in getattr(node, 'decorator_list', []):
with self._scope.start_reference():
self.visit(decorator)
self._scope.define(node.name)
for base in node.bases:
with self._scope.start_reference():
self.visit(base)
with self._scope.enter(is_class=True):
for body in node.body:
with self._scope.start_reference():
self.visit(body)
def visit_ImportFrom(self, node):
for name in node.names:
if name.name == '*':
# TODO: Do something?
continue
symbol = name.asname or name.name.split('.')[0]
self._scope.define(symbol)
# Explicitly add a reference for __future__ imports so they don't
# get pruned.
if node.module == '__future__':
self._scope.reference(symbol)
self.generic_visit(node)
def visit_Import(self, node):
for name in node.names:
self._scope.define(name.asname or name.name)
self.generic_visit(node)
def visit_With(self, node):
if hasattr(node, 'items'):
for item in node.items:
self._visit_withitem(item)
else:
self._visit_withitem(node)
with self._scope.start_reference():
self.visit(node.body)
def _visit_withitem(self, node):
if node.optional_vars:
with self._scope.start_definition():
self.visit(node.optional_vars)
with self._scope.start_reference():
self.visit(node.context_expr)
def visit_For(self, node):
with self._scope.start_definition():
self.visit(node.target)
with self._scope.start_reference():
self.visit(node.iter)
with self._scope.start_reference():
self.visit(node.body)
with self._scope.start_reference():
self.visit(node.orelse)
def visit_Attribute(self, node, chain=False):
if isinstance(node.value, ast.Name):
self._scope.extend_symbol(node.value.id)
self._scope.extend_symbol(node.attr)
if not chain:
self._scope.end_symbol()
elif isinstance(node.value, ast.Attribute):
self.visit_Attribute(node.value, chain=True)
self._scope.extend_symbol(node.attr, extend_only=True)
else:
self._scope.end_symbol()
self.visit(node.value)
self._scope.end_symbol()
def visit_Subscript(self, node):
self._scope.end_symbol()
with self._scope.start_reference():
self.visit(node.value)
self.visit(node.slice)
def visit_Call(self, node):
with self._scope.start_reference():
self.visit(node.func)
# Python 3.5 AST removed starargs and kwargs
additional = []
if getattr(node, 'starargs', None):
additional.append(node.starargs)
if getattr(node, 'kwargs', None):
additional.append(node.kwargs)
for arg in chain(node.args, node.keywords, additional):
with self._scope.start_reference():
self.visit(arg)
def visit_Name(self, node):
self._scope.extend_symbol(node.id)
self._scope.end_symbol()
if __name__ == '__main__':
with open(sys.argv[1]) as fd:
scope = Scope.from_source(fd.read())
unresolved, unreferenced = scope.find_unresolved_and_unreferenced_symbols()
from pprint import pprint
pprint(unresolved)
pprint(unreferenced)
|
|
from nanpy.memo import memoized
from nanpy.pwm import ArduinoPwmPin
import sys
LOW, HIGH = 0, 1
INPUT, OUTPUT = 0, 1
# from six
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
else:
string_types = basestring,
class PinError(Exception):
pass
def to_pin_number(pin_name, A0=None):
try:
if isinstance(pin_name, string_types):
if pin_name[0] == 'D':
nr = int(pin_name[1:])
if A0 is not None:
if nr >= A0:
raise ValueError('invalid pin id:%r' % pin_name)
elif pin_name[0] == 'A':
if A0 is None:
raise ValueError('A0 is None, but analog pin was set! (%s)' % pin_name)
nr = int(pin_name[1:]) + A0
else:
nr = int(pin_name)
elif hasattr(pin_name, 'pin_number'):
nr=pin_name.pin_number
else:
nr = int(pin_name)
except IndexError:
raise ValueError('invalid pin id:%r' % pin_name)
return nr
class ArduinoPin(object):
'''Object-oriented representation of an Arduino pin
Examples:
# they do the same
a=ArduinoTree()
a.pin.get(0).value
a.pin.get('D0').value
a.pin.get('D0').read_value()
'''
def __init__(self, name, total_pin_count, define, register, core, ram, api):
"""name can be int or string."""
self.register = register
self.core = core
self.ram = ram
self.api = api
self.define = define
self.A0 = define.get('A0')
self.pin_number = to_pin_number(name, self.A0)
if self.pin_number >= total_pin_count:
raise ValueError('pin %s (Nr:%s) not in range' %
(name, self.pin_number))
@property
@memoized
def pwm(self):
'''Object-oriented representation of the pin PWM functionality
'''
return (
ArduinoPwmPin(
self.pin_number,
self.define,
self.register,
self.core,
self.api)
)
@property
def is_digital(self):
return self.pin_number < self.A0
@property
def is_analog(self):
return not self.is_digital
@property
def avr_port(self):
'''AVR port name (example: "B")
AVR only.
'''
x = self.core.digitalPinToPort(self.pin_number)
return chr(ord('A') + x - 1)
@property
def avr_bit(self):
'''AVR bit name (example: "2")
AVR only.
'''
bitmask = self.core.digitalPinToBitMask(self.pin_number)
i = 0
while bitmask != 1:
bitmask >>= 1
i += 1
return i
@property
def avr_pin(self):
'''AVR pin name (example: "PB2")
'''
return 'P%s%s' % (self.avr_port, self.avr_bit)
@property
def name(self):
"""Arduino pin name.
D -> digital
A -> analog
(examples: "D2", "A0")
"""
if self.is_digital:
return 'D%s' % self.pin_number
else:
return 'A%s' % self.pin_number_analog
@property
def pin_number_analog(self):
x = self.pin_number - self.A0
if x >= 0:
return x
@property
def programming_function(self):
"""programming function (MISO, MOSI, SCK or SS)"""
if self.pin_number == self.define.get('MISO'):
return 'MISO'
if self.pin_number == self.define.get('MOSI'):
return 'MOSI'
if self.pin_number == self.define.get('SCK'):
return 'SCK'
if self.pin_number == self.define.get('SS'):
return 'SS'
def reset(self):
'''reset to the pin default state: INPUT, no pullup
'''
self.write_mode(INPUT)
self.write_pullup(LOW)
if self.pwm.available:
self.pwm.reset()
def write_pullup(self, value):
"""set pullup (0/1)"""
self.write_mode(INPUT)
return self.write_digital_value(value)
def read_digital_value(self, direction=None):
"""read digital value (0/1)
direction can be set, if 'direction' parameter exists
"""
if direction is not None:
self.write_mode(direction)
return self.api.digitalRead(self.pin_number)
def write_digital_value(self, value, direction=None):
"""write digital value (0/1)
direction can be set, if 'direction' parameter exists, and it is
not INPUT
"""
if direction == INPUT:
raise ValueError('write_digital_value() to INPUT??')
if direction is not None:
self.write_mode(direction)
value = 1 if value else 0
return self.api.digitalWrite(self.pin_number, value)
digital_value = property(read_digital_value, write_digital_value)
def read_analog_value(self):
'''read analog value (0-1023)
'''
if not self.is_analog:
return None
return self.api.analogRead(self.pin_number)
analog_value = property(read_analog_value)
def read_mode(self):
"""read mode (0/1)"""
bitmask = self.core.digitalPinToBitMask(self.pin_number)
port = self.core.digitalPinToPort(self.pin_number)
reg_address = self.core.portModeRegister(port)
reg_value = self.ram.read(reg_address)
mode = OUTPUT if reg_value & bitmask else INPUT
return mode
def write_mode(self, value):
"""write mode (0/1)"""
return self.api.pinMode(self.pin_number, value)
mode = property(read_mode, write_mode)
class PinFeature(object):
def __init__(self, define, register, core, ram, api):
self.A0 = define.get('A0')
self.define = define
self.register = register
self.core = core
self.ram = ram
self.api = api
@memoized
def get(self, name):
return (
ArduinoPin(
name,
self.count,
self.define,
self.register,
self.core,
self.ram,
self.api)
)
@property
@memoized
def count_analog(self):
"""Count of analog pins."""
return len(self.names_analog)
@property
@memoized
def count_digital(self):
"""Count of digital pins."""
return len(self.names_digital)
@property
@memoized
def count(self):
"""Count of all pins.
"""
return self.define.get('NUM_DIGITAL_PINS')
@property
def names(self):
"""List of all pin names."""
return self.names_digital + self.names_analog
@property
def names_digital(self):
"""List of digital pin names."""
A0 = self.A0
return ['D%s' % x for x in range(0, A0)]
@property
def names_analog(self):
"""List of analog pin names."""
A0 = self.A0
return (
['A%s' % (x - A0) for x in range(A0, self.count)]
)
|
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import array
import os
import struct
import six
from ._exceptions import *
from ._utils import validate_utf8
try:
# If wsaccel is available we use compiled routines to mask data.
from wsaccel.xormask import XorMaskerSimple
def _mask(_m, _d):
return XorMaskerSimple(_m).process(_d)
except ImportError:
# wsaccel is not available, we rely on python implementations.
def _mask(_m, _d):
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
if six.PY3:
return _d.tobytes()
else:
return _d.tostring()
__all__ = [
'ABNF', 'continuous_frame', 'frame_buffer',
'STATUS_NORMAL',
'STATUS_GOING_AWAY',
'STATUS_PROTOCOL_ERROR',
'STATUS_UNSUPPORTED_DATA_TYPE',
'STATUS_STATUS_NOT_AVAILABLE',
'STATUS_ABNORMAL_CLOSED',
'STATUS_INVALID_PAYLOAD',
'STATUS_POLICY_VIOLATION',
'STATUS_MESSAGE_TOO_BIG',
'STATUS_INVALID_EXTENSION',
'STATUS_UNEXPECTED_CONDITION',
'STATUS_BAD_GATEWAY',
'STATUS_TLS_HANDSHAKE_ERROR',
]
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_BAD_GATEWAY = 1014
STATUS_TLS_HANDSHAKE_ERROR = 1015
VALID_CLOSE_STATUS = (
STATUS_NORMAL,
STATUS_GOING_AWAY,
STATUS_PROTOCOL_ERROR,
STATUS_UNSUPPORTED_DATA_TYPE,
STATUS_INVALID_PAYLOAD,
STATUS_POLICY_VIOLATION,
STATUS_MESSAGE_TOO_BIG,
STATUS_INVALID_EXTENSION,
STATUS_UNEXPECTED_CONDITION,
STATUS_BAD_GATEWAY,
)
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_CONT = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
# available operation code value tuple
OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_CONT: "cont",
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong"
}
# data length threshold.
LENGTH_7 = 0x7e
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(self, fin=0, rsv1=0, rsv2=0, rsv3=0,
opcode=OPCODE_TEXT, mask=1, data=""):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
if data is None:
data = ""
self.data = data
self.get_mask_key = os.urandom
def validate(self, skip_utf8_validation=False):
"""
validate the ABNF frame.
skip_utf8_validation: skip utf8 validation.
"""
if self.rsv1 or self.rsv2 or self.rsv3:
raise WebSocketProtocolException("rsv is not implemented, yet")
if self.opcode not in ABNF.OPCODES:
raise WebSocketProtocolException("Invalid opcode %r", self.opcode)
if self.opcode == ABNF.OPCODE_PING and not self.fin:
raise WebSocketProtocolException("Invalid ping frame.")
if self.opcode == ABNF.OPCODE_CLOSE:
l = len(self.data)
if not l:
return
if l == 1 or l >= 126:
raise WebSocketProtocolException("Invalid close frame.")
if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]):
raise WebSocketProtocolException("Invalid close frame.")
code = 256 * \
six.byte2int(self.data[0:1]) + six.byte2int(self.data[1:2])
if not self._is_valid_close_status(code):
raise WebSocketProtocolException("Invalid close opcode.")
@staticmethod
def _is_valid_close_status(code):
return code in VALID_CLOSE_STATUS or (3000 <= code < 5000)
def __str__(self):
return "fin=" + str(self.fin) \
+ " opcode=" + str(self.opcode) \
+ " data=" + str(self.data)
@staticmethod
def create_frame(data, opcode, fin=1):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is unicode,
data value is converted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
fin: fin flag. if set to 0, create continue fragmentation.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, six.text_type):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(fin, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(self.fin << 7
| self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4
| self.opcode)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
frame_header = six.b(frame_header)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7e)
frame_header = six.b(frame_header)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7f)
frame_header = six.b(frame_header)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
if isinstance(mask_key, six.text_type):
mask_key = mask_key.encode('utf-8')
return mask_key + s
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
if data is None:
data = ""
if isinstance(mask_key, six.text_type):
mask_key = six.b(mask_key)
if isinstance(data, six.text_type):
data = six.b(data)
_m = array.array("B", mask_key)
_d = array.array("B", data)
return _mask(_m, _d)
class frame_buffer(object):
_HEADER_MASK_INDEX = 5
_HEADER_LENGTH_INDEX = 6
def __init__(self, recv_fn, skip_utf8_validation):
self.recv = recv_fn
self.skip_utf8_validation = skip_utf8_validation
# Buffers over the packets from the layer beneath until desired amount
# bytes of bytes are received.
self.recv_buffer = []
self.clear()
def clear(self):
self.header = None
self.length = None
self.mask = None
def has_received_header(self):
return self.header is None
def recv_header(self):
header = self.recv_strict(2)
b1 = header[0]
if six.PY2:
b1 = ord(b1)
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = header[1]
if six.PY2:
b2 = ord(b2)
has_mask = b2 >> 7 & 1
length_bits = b2 & 0x7f
self.header = (fin, rsv1, rsv2, rsv3, opcode, has_mask, length_bits)
def has_mask(self):
if not self.header:
return False
return self.header[frame_buffer._HEADER_MASK_INDEX]
def has_received_length(self):
return self.length is None
def recv_length(self):
bits = self.header[frame_buffer._HEADER_LENGTH_INDEX]
length_bits = bits & 0x7f
if length_bits == 0x7e:
v = self.recv_strict(2)
self.length = struct.unpack("!H", v)[0]
elif length_bits == 0x7f:
v = self.recv_strict(8)
self.length = struct.unpack("!Q", v)[0]
else:
self.length = length_bits
def has_received_mask(self):
return self.mask is None
def recv_mask(self):
self.mask = self.recv_strict(4) if self.has_mask() else ""
def recv_frame(self):
# Header
if self.has_received_header():
self.recv_header()
(fin, rsv1, rsv2, rsv3, opcode, has_mask, _) = self.header
# Frame length
if self.has_received_length():
self.recv_length()
length = self.length
# Mask
if self.has_received_mask():
self.recv_mask()
mask = self.mask
# Payload
payload = self.recv_strict(length)
if has_mask:
payload = ABNF.mask(mask, payload)
# Reset for next frame
self.clear()
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
frame.validate(self.skip_utf8_validation)
return frame
def recv_strict(self, bufsize):
shortage = bufsize - sum(len(x) for x in self.recv_buffer)
while shortage > 0:
# Limit buffer size that we pass to socket.recv() to avoid
# fragmenting the heap -- the number of bytes recv() actually
# reads is limited by socket buffer and is relatively small,
# yet passing large numbers repeatedly causes lots of large
# buffers allocated and then shrunk, which results in
# fragmentation.
bytes_ = self.recv(min(16384, shortage))
self.recv_buffer.append(bytes_)
shortage -= len(bytes_)
unified = six.b("").join(self.recv_buffer)
if shortage == 0:
self.recv_buffer = []
return unified
else:
self.recv_buffer = [unified[bufsize:]]
return unified[:bufsize]
class continuous_frame(object):
def __init__(self, fire_cont_frame, skip_utf8_validation):
self.fire_cont_frame = fire_cont_frame
self.skip_utf8_validation = skip_utf8_validation
self.cont_data = None
self.recving_frames = None
def validate(self, frame):
if not self.recving_frames and frame.opcode == ABNF.OPCODE_CONT:
raise WebSocketProtocolException("Illegal frame")
if self.recving_frames and \
frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
raise WebSocketProtocolException("Illegal frame")
def add(self, frame):
if self.cont_data:
self.cont_data[1] += frame.data
else:
if frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
self.recving_frames = frame.opcode
self.cont_data = [frame.opcode, frame.data]
if frame.fin:
self.recving_frames = None
def is_fire(self, frame):
return frame.fin or self.fire_cont_frame
def extract(self, frame):
data = self.cont_data
self.cont_data = None
frame.data = data[1]
if not self.fire_cont_frame and data[0] == ABNF.OPCODE_TEXT and not self.skip_utf8_validation and not validate_utf8(frame.data):
raise WebSocketPayloadException(
"cannot decode: " + repr(frame.data))
return [data[0], frame]
|
|
from __future__ import unicode_literals
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# http://www.postgresql.org/docs/9.4/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
sql = "EXTRACT('dow' FROM %s) + 1" % field_name
else:
sql = "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def value_to_db_date(self, value):
return value
def value_to_db_datetime(self, value):
return value
def value_to_db_time(self, value):
return value
def value_to_db_ipaddress(self, value):
if value:
return Inet(value)
return None
|
|
"""Server-side implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
.. warning::
The WebSocket protocol was recently finalized as `RFC 6455
<http://tools.ietf.org/html/rfc6455>`_ and is not yet supported in
all browsers. Refer to http://caniuse.com/websockets for details
on compatibility. In addition, during development the protocol
went through several incompatible versions, and some browsers only
support older versions. By default this module only supports the
latest version of the protocol, but optional support for an older
version (known as "draft 76" or "hixie-76") can be enabled by
overriding `WebSocketHandler.allow_draft76` (see that method's
documentation for caveats).
"""
from __future__ import absolute_import, division, with_statement
# Author: Jacob Kristhammar, 2010
import array
import datetime
import functools
import hashlib
import logging
import struct
import base64
import tornado.escape
import tornado.web
from tornado.util import bytes_type, b
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override on_message to handle incoming messages. You can also override
open and on_close to handle opened and closed connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example Web Socket handler that echos back all received messages
back to the client::
class EchoWebSocket(websocket.WebSocketHandler):
def open(self):
print "WebSocket opened"
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print "WebSocket closed"
Web Sockets are not standard HTTP connections. The "handshake" is HTTP,
but after the handshake, the protocol is message-based. Consequently,
most of the Tornado HTTP facilities are not available in handlers of this
type. The only communication methods available to you are write_message()
and close(). Likewise, your request handler class should
implement open() method rather than get() or post().
If you map the handler above to "/websocket" in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
"""
def __init__(self, application, request, **kwargs):
tornado.web.RequestHandler.__init__(self, application, request,
**kwargs)
self.stream = request.connection.stream
self.ws_connection = None
def _execute(self, transforms, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Websocket only supports GET method
if self.request.method != 'GET':
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 405 Method Not Allowed\r\n\r\n"
))
self.stream.close()
return
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 400 Bad Request\r\n\r\n"
"Can \"Upgrade\" only to \"WebSocket\"."
))
self.stream.close()
return
# Connection header should be upgrade. Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(), headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 400 Bad Request\r\n\r\n"
"\"Connection\" must be \"Upgrade\"."
))
self.stream.close()
return
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"):
self.ws_connection = WebSocketProtocol13(self)
self.ws_connection.accept_connection()
elif (self.allow_draft76() and
"Sec-WebSocket-Version" not in self.request.headers):
self.ws_connection = WebSocketProtocol76(self)
self.ws_connection.accept_connection()
else:
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 8\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
"""
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def open(self):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def on_close(self):
"""Invoked when the WebSocket is closed."""
pass
def close(self):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
"""
self.ws_connection.close()
def allow_draft76(self):
"""Override to enable support for the older "draft76" protocol.
The draft76 version of the websocket protocol is disabled by
default due to security concerns, but it can be enabled by
overriding this method to return True.
Connections using the draft76 protocol do not support the
``binary=True`` flag to `write_message`.
Support for the draft76 protocol is deprecated and will be
removed in a future version of Tornado.
"""
return False
def get_websocket_scheme(self):
"""Return the url scheme used for this request, either "ws" or "wss".
This is normally decided by HTTPServer, but applications
may wish to override this if they are using an SSL proxy
that does not provide the X-Scheme header as understood
by HTTPServer.
Note that this is only used by the draft76 protocol.
"""
return "wss" if self.request.protocol == "https" else "ws"
def async_callback(self, callback, *args, **kwargs):
"""Wrap callbacks with this if they are used on asynchronous requests.
Catches exceptions properly and closes this WebSocket if an exception
is uncaught. (Note that this is usually unnecessary thanks to
`tornado.stack_context`)
"""
return self.ws_connection.async_callback(callback, *args, **kwargs)
def _not_supported(self, *args, **kwargs):
raise Exception("Method not supported for Web Sockets")
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
self.on_close()
for method in ["write", "redirect", "set_header", "send_error", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method, WebSocketHandler._not_supported)
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def async_callback(self, callback, *args, **kwargs):
"""Wrap callbacks with this if they are used on asynchronous requests.
Catches exceptions properly and closes this WebSocket if an exception
is uncaught.
"""
if args or kwargs:
callback = functools.partial(callback, *args, **kwargs)
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception:
logging.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
return wrapper
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class WebSocketProtocol76(WebSocketProtocol):
"""Implementation of the WebSockets protocol, version hixie-76.
This class provides basic functionality to process WebSockets requests as
specified in
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
"""
def __init__(self, handler):
WebSocketProtocol.__init__(self, handler)
self.challenge = None
self._waiting = None
def accept_connection(self):
try:
self._handle_websocket_headers()
except ValueError:
logging.debug("Malformed WebSocket request received")
self._abort()
return
scheme = self.handler.get_websocket_scheme()
# draft76 only allows a single subprotocol
subprotocol_header = ''
subprotocol = self.request.headers.get("Sec-WebSocket-Protocol", None)
if subprotocol:
selected = self.handler.select_subprotocol([subprotocol])
if selected:
assert selected == subprotocol
subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected
# Write the initial headers before attempting to read the challenge.
# This is necessary when using proxies (such as HAProxy), which
# need to see the Upgrade headers before passing through the
# non-HTTP traffic that follows.
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Server: TornadoServer/%(version)s\r\n"
"Sec-WebSocket-Origin: %(origin)s\r\n"
"Sec-WebSocket-Location: %(scheme)s://%(host)s%(uri)s\r\n"
"%(subprotocol)s"
"\r\n" % (dict(
version=tornado.version,
origin=self.request.headers["Origin"],
scheme=scheme,
host=self.request.host,
uri=self.request.uri,
subprotocol=subprotocol_header))))
self.stream.read_bytes(8, self._handle_challenge)
def challenge_response(self, challenge):
"""Generates the challenge response that's needed in the handshake
The challenge parameter should be the raw bytes as sent from the
client.
"""
key_1 = self.request.headers.get("Sec-Websocket-Key1")
key_2 = self.request.headers.get("Sec-Websocket-Key2")
try:
part_1 = self._calculate_part(key_1)
part_2 = self._calculate_part(key_2)
except ValueError:
raise ValueError("Invalid Keys/Challenge")
return self._generate_challenge_response(part_1, part_2, challenge)
def _handle_challenge(self, challenge):
try:
challenge_response = self.challenge_response(challenge)
except ValueError:
logging.debug("Malformed key data in WebSocket request")
self._abort()
return
self._write_response(challenge_response)
def _write_response(self, challenge):
self.stream.write(challenge)
self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs)
self._receive_message()
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Origin", "Host", "Sec-Websocket-Key1",
"Sec-Websocket-Key2")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
def _calculate_part(self, key):
"""Processes the key headers and calculates their key value.
Raises ValueError when feed invalid key."""
number = int(''.join(c for c in key if c.isdigit()))
spaces = len([c for c in key if c.isspace()])
try:
key_number = number // spaces
except (ValueError, ZeroDivisionError):
raise ValueError
return struct.pack(">I", key_number)
def _generate_challenge_response(self, part_1, part_2, part_3):
m = hashlib.md5()
m.update(part_1)
m.update(part_2)
m.update(part_3)
return m.digest()
def _receive_message(self):
self.stream.read_bytes(1, self._on_frame_type)
def _on_frame_type(self, byte):
frame_type = ord(byte)
if frame_type == 0x00:
self.stream.read_until(b("\xff"), self._on_end_delimiter)
elif frame_type == 0xff:
self.stream.read_bytes(1, self._on_length_indicator)
else:
self._abort()
def _on_end_delimiter(self, frame):
if not self.client_terminated:
self.async_callback(self.handler.on_message)(
frame[:-1].decode("utf-8", "replace"))
if not self.client_terminated:
self._receive_message()
def _on_length_indicator(self, byte):
if ord(byte) != 0x00:
self._abort()
return
self.client_terminated = True
self.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
raise ValueError(
"Binary messages not supported by this version of websockets")
if isinstance(message, unicode):
message = message.encode("utf-8")
assert isinstance(message, bytes_type)
self.stream.write(b("\x00") + message + b("\xff"))
def close(self):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
self.stream.write("\xff\x00")
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
self._waiting = self.stream.io_loop.add_timeout(
datetime.timedelta(seconds=5), self._abort)
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
def __init__(self, handler):
WebSocketProtocol.__init__(self, handler)
self._final_frame = False
self._frame_opcode = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
logging.debug("Malformed WebSocket request received")
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
def _challenge_response(self):
sha1 = hashlib.sha1()
sha1.update(tornado.escape.utf8(
self.request.headers.get("Sec-Websocket-Key")))
sha1.update(b("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")) # Magic value
return tornado.escape.native_str(base64.b64encode(sha1.digest()))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s"
"\r\n" % (self._challenge_response(), subprotocol_header)))
self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs)
self._receive_frame()
def _write_frame(self, fin, opcode, data):
if fin:
finbit = 0x80
else:
finbit = 0
frame = struct.pack("B", finbit | opcode)
l = len(data)
if l < 126:
frame += struct.pack("B", l)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126, l)
else:
frame += struct.pack("!BQ", 127, l)
frame += data
self.stream.write(frame)
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes_type)
self._write_frame(True, opcode, message)
def _receive_frame(self):
self.stream.read_bytes(2, self._on_frame_start)
def _on_frame_start(self, data):
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & 0x80
reserved_bits = header & 0x70
self._frame_opcode = header & 0xf
self._frame_opcode_is_control = self._frame_opcode & 0x8
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
if not (payloadlen & 0x80):
# Unmasked frame -> abort connection
self._abort()
return
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
if payloadlen < 126:
self._frame_length = payloadlen
self.stream.read_bytes(4, self._on_masking_key)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
def _on_frame_length_16(self, data):
self._frame_length = struct.unpack("!H", data)[0]
self.stream.read_bytes(4, self._on_masking_key)
def _on_frame_length_64(self, data):
self._frame_length = struct.unpack("!Q", data)[0]
self.stream.read_bytes(4, self._on_masking_key)
def _on_masking_key(self, data):
self._frame_mask = array.array("B", data)
self.stream.read_bytes(self._frame_length, self._on_frame_data)
def _on_frame_data(self, data):
unmasked = array.array("B", data)
for i in xrange(len(data)):
unmasked[i] = unmasked[i] ^ self._frame_mask[i % 4]
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += unmasked
if self._final_frame:
opcode = self._fragmented_message_opcode
unmasked = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = unmasked
if self._final_frame:
self._handle_message(opcode, unmasked.tostring())
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if opcode == 0x1:
# UTF-8 data
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self.async_callback(self.handler.on_message)(decoded)
elif opcode == 0x2:
# Binary data
self.async_callback(self.handler.on_message)(data)
elif opcode == 0x8:
# Close
self.client_terminated = True
self.close()
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
pass
else:
self._abort()
def close(self):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
self._write_frame(True, 0x8, b(""))
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
datetime.timedelta(seconds=5), self._abort)
|
|
from cuttsum.data import get_resource_manager
import re
from itertools import izip
import scipy.cluster.hierarchy as hac
import fastcluster
from sklearn.preprocessing import Normalizer
import numpy as np
import os
import gzip
from datetime import datetime, timedelta
from cuttsum.misc import ProgressBar
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
from cuttsum.salience import SaliencePredictionAggregator
class HACSummarizer(object):
def __init__(self):
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'hac-summaries')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
# def get_tsv_dir(self, prefix, feature_set):
# return os.path.join(self.dir_, prefix + "." + feature_set.fs_name())
def get_tsv_path(self, event, cutoff):
#tsv_dir = self.get_tsv_dir(prefix, feature_set, cutoff)
return os.path.join(self.dir_,
"hac-{}-cutoff{}.tsv.gz".format(event.fs_name(), cutoff))
def get_dataframe(self, event, cutoff):
tsv = self.get_tsv_path(event, cutoff)
if not os.path.exists(tsv):
return None
else:
with gzip.open(tsv, u'r') as f:
df = pd.io.parsers.read_csv(
f, sep='\t', quoting=3, header=0)
return df
def make_summary(self, event, corpus, prefix, feature_set, cutoff):
string_res = get_resource_manager(u'SentenceStringsResource')
lvec_res = get_resource_manager(u'SentenceLatentVectorsResource')
spa = SaliencePredictionAggregator()
tsv_path = self.get_tsv_path(event, cutoff)
updates = []
epoch = datetime.utcfromtimestamp(0)
for hour in event.list_event_hours():
hp1 = hour + timedelta(hours=1)
timestamp = str(int((hp1 - epoch).total_seconds()))
string_df = string_res.get_dataframe(event, hour)
lvec_df = lvec_res.get_dataframe(event, hour)
sal_df = spa.get_dataframe(event, hour, prefix, feature_set)
if string_df is None or lvec_df is None or sal_df is None:
continue
string_df = string_df.drop_duplicates(
subset=[u'stream id', u'sentence id'])
lvec_df = lvec_df.drop_duplicates(
subset=[u'stream id', u'sentence id'])
sal_df = sal_df.drop_duplicates(
subset=[u'stream id', u'sentence id'])
string_df.sort([u"stream id", u"sentence id"], inplace=True)
lvec_df.sort([u"stream id", u"sentence id"], inplace=True)
sal_df.sort([u"stream id", u"sentence id"], inplace=True)
X = lvec_df.as_matrix()[:,2:].astype(np.float64)
good_rows = np.where(X.any(axis=1))[0]
string_df = string_df.iloc[good_rows]
lvec_df = lvec_df.iloc[good_rows]
sal_df = sal_df.iloc[good_rows]
assert len(string_df) == len(lvec_df)
assert len(string_df) == len(sal_df)
n_sents = len(string_df)
for i in xrange(n_sents):
assert string_df[u'stream id'].iloc[i] == \
lvec_df[u'stream id'].iloc[i]
assert string_df[u'stream id'].iloc[i] == \
sal_df[u'stream id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
lvec_df[u'sentence id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
sal_df[u'sentence id'].iloc[i]
lvec_df.reset_index(drop=True, inplace=True)
string_df.reset_index(drop=True, inplace=True)
sal_df.reset_index(drop=True, inplace=True)
good_rows = []
for name, doc in string_df.groupby("stream id"):
for rname, row in doc.iterrows():
scstring = row["streamcorpus"]
words = len(re.findall(r'\b[^\W\d_]+\b', scstring))
socs = len(re.findall(
r'Digg|del\.icio\.us|Facebook|Kwoff|Myspace',
scstring))
langs = len(re.findall(
r'Flash|JavaScript|CSS', scstring, re.I))
assert lvec_df.loc[rname][u'sentence id'] == \
row[u'sentence id']
assert lvec_df.loc[rname][u'stream id'] == \
row[u'stream id']
assert sal_df.loc[rname][u'sentence id'] == \
row[u'sentence id']
assert sal_df.loc[rname][u'stream id'] == \
row[u'stream id']
if words > 9 and len(doc) < 200 \
and socs < 2 and langs < 2:
good_rows.append(rname)
lvec_df = lvec_df.loc[good_rows]
string_df = string_df.loc[good_rows]
sal_df = sal_df.loc[good_rows]
n_sents = len(string_df)
if n_sents < 10:
continue
for i in xrange(n_sents):
assert string_df[u'stream id'].iloc[i] == \
lvec_df[u'stream id'].iloc[i]
assert string_df[u'stream id'].iloc[i] == \
sal_df[u'stream id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
lvec_df[u'sentence id'].iloc[i]
assert string_df[u'sentence id'].iloc[i] == \
sal_df[u'sentence id'].iloc[i]
X = lvec_df.as_matrix()[:,2:].astype(np.float64)
S = sal_df.as_matrix()[:,2:].astype(np.float64)
s = np.mean(S, axis=1)
#Xn = Normalizer().fit_transform(X)
z = fastcluster.linkage(X,
method='single', metric='euclidean', preserve_input=True)
clusters = hac.fcluster(z, cutoff, 'distance')
II = np.arange(n_sents)
#print set(clusters)
for cluster_id, cluster in enumerate(set(clusters)):
# print cluster
# print (clusters == cluster).shape
# print II.shape
ii = II[clusters == cluster]
#print ii.shape
C = X[clusters == cluster,:]
u = np.mean(C, axis=0)
dist_2 = np.sum((C - u)**2, axis=1)
cidx = np.argmin(dist_2)
#cidx = np.argmax(cosine_similarity(C, u))
e = ii[cidx]
#
#Cs = s[clusters == cluster]
# e = ii[np.argmax(Cs)],
cluster_size = C.shape[0]
scstring = string_df.iloc[e][u'streamcorpus']
stream_id = string_df.iloc[e][u'stream id']
sentence_id = str(string_df.iloc[e][u'sentence id'])
updates.append({"stream id": stream_id,
"sentence id": sentence_id,
"hour": hour,
"timestamp": timestamp,
"cluster size": cluster_size,
"string": scstring})
df = pd.DataFrame(updates,
columns=["stream id", "sentence id", "hour", "timestamp",
"cluster size", "string"])
with gzip.open(tsv_path, u'w') as f:
df.to_csv(f, sep='\t', index=False, index_label=False)
#for idx in np.nditer(ii):
# if C.shape[0] <= 3:
# #print "Too small"
# continue
# u = np.mean(C, axis=0)
# #dist_2 = np.sum((C - u)**2, axis=1)
# #cidx = np.argmin(dist_2)
# cidx = np.argmax(cosine_similarity(C, u))
# e = ii[cidx]
# if Xcache is None:
# Xcache = X[e]
# else:
# if np.max(cosine_similarity(Xcache, X[e])) >= .5:
# #print "Too similar"
# continue
# else:
# Xcache = np.vstack((Xcache, X[e]))
#
# stream_id = str(lvec_df.iloc[e][u'stream id'])
# sentence_id = str(lvec_df.iloc[e][u'sentence id'])
# scstring = \
# string_df.iloc[ii[cidx]]['streamcorpus']
# sf.write(' '.join(
# [str(event.query_id).split(".")[1], "cunlp", "hac",
# stream_id, sentence_id, timestamp, "1\n"]))
# tf.write('\t'.join(
# [str(event.query_id).split(".")[1], "cunlp", "hac",
# stream_id, sentence_id,
# timestamp, "1", scstring + "\n"]))
#
## string_df.sort([u"stream id", u"sentence id"], inplace=True)
## lvec_df.sort([u"stream id", u"sentence id"], inplace=True)
##
## X = lvec_df.ix[:,2:].as_matrix()
## good_rows = np.where(X.any(axis=1))[0]
## string_df = string_df.iloc[good_rows]
## lvec_df = lvec_df.iloc[good_rows]
## assert len(string_df) == len(lvec_df)
## string_df = string_df.drop_duplicates(
## subset=[u'stream id', u'sentence id'])
##
## lvec_df = lvec_df.drop_duplicates(
## subset=[u'stream id', u'sentence id'])
## n_sents = len(string_df)
##
## for i in xrange(n_sents):
## assert string_df[u'stream id'].iloc[i] == \
## lvec_df[u'stream id'].iloc[i]
## assert string_df[u'sentence id'].iloc[i] == \
## lvec_df[u'sentence id'].iloc[i]
##
## good_rows = []
## for name, doc in string_df.groupby("stream id"):
## for rname, row in doc.iterrows():
##
##
## scstring = row["streamcorpus"]
## #scstring = doc.iloc[i]["streamcorpus"]
## words = len(re.findall(r'\b[^\W\d_]+\b', scstring))
## socs = len(re.findall(
## r'Digg|del\.icio\.us|Facebook|Kwoff|Myspace',
## scstring))
## langs = len(re.findall(
## r'Flash|JavaScript|CSS', scstring, re.I))
##
## assert lvec_df.loc[rname][u'sentence id'] == \
## row[u'sentence id']
## assert lvec_df.loc[rname][u'stream id'] == \
## row[u'stream id']
##
## if words > 6 and len(doc) < 200 \
## and socs < 2 and langs < 2:
##
## good_rows.append(rname)
## #print lvec_df.loc[rname][2:].as_list()
## #print "\n--"
##
## lvec_df = lvec_df.loc[good_rows]
## string_df = string_df.loc[good_rows]
## n_sents = len(string_df)
##
## for i in xrange(n_sents):
## assert string_df[u'stream id'].iloc[i] == \
## lvec_df[u'stream id'].iloc[i]
## assert string_df[u'sentence id'].iloc[i] == \
## lvec_df[u'sentence id'].iloc[i]
##
## X = lvec_df.ix[:,2:].as_matrix()
## if X.shape[0] < 10:
## continue
#
# string_df.sort([u"stream id", u"sentence id"], inplace=True)
# lvec_df.sort([u"stream id", u"sentence id"], inplace=True)
# #sal_df.sort([u"stream id", u"sentence id"], inplace=True)
#
# X = lvec_df.ix[:,2:].as_matrix()
# good_rows = np.where(X.any(axis=1))[0]
# string_df = string_df.iloc[good_rows]
# lvec_df = lvec_df.iloc[good_rows]
# #sal_df = sal_df.iloc[good_rows]
# assert len(string_df) == len(lvec_df)
# #assert len(string_df) == len(sal_df)
# string_df = string_df.drop_duplicates(
# subset=[u'stream id', u'sentence id'])
#
# lvec_df = lvec_df.drop_duplicates(
# subset=[u'stream id', u'sentence id'])
#
# n_sents = len(string_df)
#
# for i in xrange(n_sents):
# assert string_df[u'stream id'].iloc[i] == \
# lvec_df[u'stream id'].iloc[i]
# assert string_df[u'sentence id'].iloc[i] == \
# lvec_df[u'sentence id'].iloc[i]
#
# lvec_df.reset_index(drop=True, inplace=True)
# string_df.reset_index(drop=True, inplace=True)
# good_rows = []
# for name, doc in string_df.groupby("stream id"):
# for rname, row in doc.iterrows():
# scstring = row["streamcorpus"]
# words = len(re.findall(r'\b[^\W\d_]+\b', scstring))
# socs = len(re.findall(
# r'Digg|del\.icio\.us|Facebook|Kwoff|Myspace',
# scstring))
# langs = len(re.findall(
# r'Flash|JavaScript|CSS', scstring, re.I))
#
# assert lvec_df.loc[rname][u'sentence id'] == \
# row[u'sentence id']
# assert lvec_df.loc[rname][u'stream id'] == \
# row[u'stream id']
#
# if words > 9 and len(doc) < 200 \
# and socs < 2 and langs < 2:
#
# good_rows.append(rname)
#
# lvec_df = lvec_df.loc[good_rows]
# string_df = string_df.loc[good_rows]
# n_sents = len(string_df)
# if n_sents < 10:
# continue
#
# for i in xrange(n_sents):
# assert string_df[u'stream id'].iloc[i] == \
# lvec_df[u'stream id'].iloc[i]
# assert string_df[u'sentence id'].iloc[i] == \
# lvec_df[u'sentence id'].iloc[i]
#
# X = lvec_df.ix[:,2:].as_matrix()
# Xn = Normalizer().fit_transform(X)
# z = hac.linkage(Xn, method='average', metric='euclidean')
# clusters = hac.fcluster(z, 1.35, 'distance')
# II = np.arange(n_sents)
# #print set(clusters)
# for cluster_id, cluster in enumerate(set(clusters)):
# # print cluster
# # print (clusters == cluster).shape
# # print II.shape
# ii = II[clusters == cluster]
# #print ii.shape
# C = X[clusters == cluster,:]
# if C.shape[0] <= 3:
# #print "Too small"
# continue
# u = np.mean(C, axis=0)
# #dist_2 = np.sum((C - u)**2, axis=1)
# #cidx = np.argmin(dist_2)
# cidx = np.argmax(cosine_similarity(C, u))
# e = ii[cidx]
# if Xcache is None:
# Xcache = X[e]
# else:
# if np.max(cosine_similarity(Xcache, X[e])) >= .5:
# #print "Too similar"
# continue
# else:
# Xcache = np.vstack((Xcache, X[e]))
#
# stream_id = str(lvec_df.iloc[e][u'stream id'])
# sentence_id = str(lvec_df.iloc[e][u'sentence id'])
# scstring = \
# string_df.iloc[ii[cidx]]['streamcorpus']
# sf.write(' '.join(
# [str(event.query_id).split(".")[1], "cunlp", "hac",
# stream_id, sentence_id, timestamp, "1\n"]))
# tf.write('\t'.join(
# [str(event.query_id).split(".")[1], "cunlp", "hac",
# stream_id, sentence_id,
# timestamp, "1", scstring + "\n"]))
#
#__dt_cvrt = lambda x: datetime.utcfromtimestamp(int(x))
|
|
# Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Assignment tests, cover most forms of them. """
from __future__ import print_function
import sys
# Tests are dirty on purpose.
#
# pylint: disable=broad-except,global-variable-undefined,redeclared-assigned-name
# pylint: disable=global-variable-not-assigned,invalid-name,self-assigning-variable
def someFunction():
a = 2
print("Simple assignment to variable:", a)
b = c = 3
print("Assignment to 2 variables", b, c)
z = [1, 2, 3]
z[2] = z[1] = 5
print("Assignment to list subscripts:", z)
d, e = 1, 2
print("Assignment to variable tuple:", d, e)
[f, g] = 7, 9
print("Assignment to variable list:", f, g)
j = [h, i] = (7, 9)
print("Complex Assignment from variable list:", j, type(j), h, i)
a, (b, c) = 1, (2, 3)
print("Assignment to nested tuples:", a, b, c)
v = [1, 2, 3, 4]
v[2:3] = (8, 9)
print("Assignment to list slice", v)
def varargsFunction(*args):
f1, f2, f3, f4 = args
print("Assignment from list", f1, f2, f3, f4)
def otherFunction():
class Iterable:
def __iter__(self):
return iter(range(3))
a, b, c = Iterable()
print("Assignments from iterable", a, b, c)
print("Assignments from too small iterable", end=" ")
try:
f, g = (1,)
except Exception as e:
print("gave", type(e), repr(e))
try:
print(f)
except UnboundLocalError:
print("Variable f is untouched")
try:
print(g)
except UnboundLocalError:
print("Variable g is untouched")
print("Assignments from too large iterable", end=" ")
try:
d, j = 1, 2, 3
except Exception as e:
print("gave", type(e), repr(e))
try:
print(d)
except UnboundLocalError:
print("Variable d is untouched")
try:
print(j)
except UnboundLocalError:
print("Variable j is untouched")
class BasicIterClass:
def __init__(self, n):
self.n = n
self.i = 0
def __next__(self):
res = self.i
if res >= self.n:
raise StopIteration
self.i = res + 1
return res
if sys.version_info[0] < 3:
def next(self):
return self.__next__()
class IteratingSequenceClass:
def __init__(self, n):
self.n = n
def __iter__(self):
return BasicIterClass(self.n)
print("Exception from iterating over too short class:", end=" ")
try:
a, b, c = IteratingSequenceClass(2)
except ValueError:
print("gave", sys.exc_info())
def anotherFunction():
d = {}
print("Assignment to dictionary with comma subscript:", end="")
# d["f"] = 3
d["a", "b"] = 6
d["c", "b"] = 9
print(sorted(d.items()))
def swapVariables():
print("Strange swap form:")
a = 1
b = 2
a, b, a = b, a, b
print(a, b)
def interuptedUnpack():
a = 1
b = 2
print("Assignment from a too short tuple to multiple targets:", end=" ")
try:
s = (a,)
c, d = s
except ValueError as e:
print("gives ValueError", repr(e))
try:
print(c)
except UnboundLocalError as e:
print("and then nothing is assigned:", repr(e))
else:
del d
del a, b
z = []
try:
a, z.unknown, b = 1, 2, 3
except AttributeError:
print("Interrupted unpack, leaves value assigned", a)
def multiTargetInterrupt():
a = 1
b = 2
print("Multiple, overlapping targets", end="")
d = c, d = a, b
print(d, c, end="")
del c
del d
c, d = d = a, b
print(d, c)
print("Error during multiple assignments", end="")
del c
del d
e = 9
z = []
try:
c, d = e, z.a = a, b
except AttributeError:
print("having attribute error", c, d, e)
del c
del d
e = 9
print("Error during multiple assignments", end="")
try:
c, d = z.a, e = a, b
except AttributeError:
print("having attribute error", c, d, e)
def optimizeableTargets():
a = [1, 2]
a[int(1)] = 3
print("Optimizable slice operation, results in", a)
def complexDel():
a = b = c = d = 1
del a, b, (c, d)
try:
print(c)
except UnboundLocalError as e:
print("yes, del worked", repr(e))
def sliceDel():
# Python3 ranges are not lists.
a = list(range(6))
del a[2:4]
print("Del slice operation, results in", a)
def globalErrors():
global unassigned_1, unassigned_2
try:
unassigned_1 = unassigned_1
except NameError as e:
print("Accessing unassigned global gives", repr(e))
try:
del unassigned_2
except NameError as e:
print("Del on unassigned global gives", repr(e))
someFunction()
varargsFunction(1, 2, 3, 4)
otherFunction()
anotherFunction()
swapVariables()
interuptedUnpack()
multiTargetInterrupt()
optimizeableTargets()
complexDel()
sliceDel()
globalErrors()
|
|
import hashlib
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.query import QuerySet
from generic_aggregation import generic_annotate
from .utils import is_gfk, recommended_items
class RatedItemBase(models.Model):
score = models.FloatField(default=0, db_index=True)
user = models.ForeignKey(User, related_name="%(class)ss", on_delete=models.CASCADE)
hashed = models.CharField(max_length=40, editable=False, db_index=True)
class Meta:
abstract = True
def __str__(self):
return "%s rated %s by %s" % (self.content_object, self.score, self.user)
def save(self, *args, **kwargs):
self.hashed = self.generate_hash()
super(RatedItemBase, self).save(*args, **kwargs)
def generate_hash(self):
content_field = self._meta.get_field("content_object")
related_object = getattr(self, content_field.name)
uniq = "%s.%s" % (related_object._meta, related_object.pk)
return hashlib.sha1(uniq.encode("ascii")).hexdigest()
@classmethod
def lookup_kwargs(cls, instance):
return {"content_object": instance}
@classmethod
def base_kwargs(cls, model_class):
return {}
class RatedItem(RatedItemBase):
object_id = models.IntegerField()
content_type = models.ForeignKey(
ContentType,
related_name="rated_items",
on_delete=models.CASCADE,
)
content_object = GenericForeignKey()
@classmethod
def lookup_kwargs(cls, instance):
return {"object_id": instance.pk, "content_type": ContentType.objects.get_for_model(instance)}
@classmethod
def base_kwargs(cls, model_class):
return {"content_type": ContentType.objects.get_for_model(model_class)}
# this goes on your model
class Ratings:
def __init__(self, rating_model=None):
self.rating_model = rating_model or RatedItem
def contribute_to_class(self, cls, name):
# set up the ForeignRelatedObjectsDescriptor right hyah
setattr(cls, name, _RatingsDescriptor(cls, self.rating_model, name))
setattr(cls, "_ratings_field", name)
class RatingsQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None, hints=None, rated_model=None):
self.rated_model = rated_model
super(RatingsQuerySet, self).__init__(model, query, using, hints)
def _clone(self, *args, **kwargs):
instance = super(RatingsQuerySet, self)._clone(*args, **kwargs)
instance.rated_model = self.rated_model
return instance
def order_by_rating(self, aggregator=models.Sum, descending=True, queryset=None, alias="score"):
related_field = self.model._meta.get_field("content_object")
if queryset is None:
queryset = self.rated_model._default_manager.all()
ordering = descending and "-%s" % alias or alias
if not is_gfk(related_field):
query_name = related_field.related_query_name()
if len(self.query.where.children):
queryset = queryset.filter(**{"%s__pk__in" % query_name: self.values_list("pk")})
return queryset.annotate(**{alias: aggregator("%s__score" % query_name)}).order_by(ordering)
else:
return generic_annotate(queryset, self, aggregator("score"), related_field, alias=alias).order_by(ordering)
class _RatingsDescriptor(models.Manager):
def __init__(self, rated_model, rating_model, rating_field):
self.rated_model = rated_model
self.rating_model = rating_model
self.rating_field = rating_field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.create_manager(instance, self.rating_model._default_manager.__class__)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
manager.add(*value)
def get_queryset(self):
base_filters = self.rating_model.base_kwargs(self.rated_model)
qs = RatingsQuerySet(self.rating_model, rated_model=self.rated_model)
return qs.filter(**base_filters)
def delete_manager(self, instance):
"""
Returns a queryset based on the related model's base manager (rather
than the default manager, as returned by __get__). Used by
Model.delete().
"""
return self.create_manager(instance, self.rating_model._base_manager.__class__)
def create_manager(self, instance, superclass):
"""
Dynamically create a RelatedManager to handle the back side of the (G)FK
"""
rel_model = self.rating_model
rated_model = self.rated_model
class RelatedManager(superclass):
def get_queryset(self):
qs = RatingsQuerySet(rel_model, rated_model=rated_model)
return qs.filter(**(self.core_filters))
def add(self, *objs):
lookup_kwargs = rel_model.lookup_kwargs(instance)
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
for (k, v) in lookup_kwargs.items():
setattr(obj, k, v)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update(rel_model.lookup_kwargs(instance))
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs.update(rel_model.lookup_kwargs(instance))
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
def remove(self, *objs):
for obj in objs:
# Is obj actually part of this descriptor set?
if obj not in self.all():
raise rel_model.DoesNotExist("%r is not related to %r." % (obj, instance))
obj.delete()
remove.alters_data = True
def clear(self):
self.all().delete()
clear.alters_data = True
def rate(self, user, score):
rating, created = self.get_or_create(user=user)
if created or score != rating.score:
rating.score = score
rating.save()
return rating
def unrate(self, user):
return self.filter(user=user, **rel_model.lookup_kwargs(instance)).delete()
def perform_aggregation(self, aggregator):
score = self.all().aggregate(agg=aggregator("score"))
return score["agg"]
def cumulative_score(self):
# simply the sum of all scores, useful for +1/-1
return self.perform_aggregation(models.Sum)
def average_score(self):
# the average of all the scores, useful for 1-5
return self.perform_aggregation(models.Avg)
def standard_deviation(self):
# the standard deviation of all the scores, useful for 1-5
return self.perform_aggregation(models.StdDev)
def variance(self):
# the variance of all the scores, useful for 1-5
return self.perform_aggregation(models.Variance)
def similar_items(self):
return SimilarItem.objects.get_for_item(instance)
manager = RelatedManager()
manager.core_filters = rel_model.lookup_kwargs(instance)
manager.model = rel_model
return manager
def update_similar_items(self):
from ratings.utils import calculate_similar_items
calculate_similar_items(self.all())
def similar_items(self, item):
return SimilarItem.objects.get_for_item(item)
def recommended_items(self, user):
return recommended_items(self.all(), user)
def order_by_rating(self, aggregator=models.Sum, descending=True, queryset=None, alias="score"):
return self.all().order_by_rating(aggregator, descending, queryset, alias)
class SimilarItemManager(models.Manager):
def get_for_item(self, instance):
ctype = ContentType.objects.get_for_model(instance)
qs = self.filter(content_type=ctype, object_id=instance.pk)
return qs.order_by("-score")
class SimilarItem(models.Model):
content_type = models.ForeignKey(
ContentType,
related_name="similar_items",
on_delete=models.CASCADE,
)
object_id = models.IntegerField()
content_object = GenericForeignKey("content_type", "object_id")
similar_content_type = models.ForeignKey(
ContentType,
related_name="similar_items_set",
on_delete=models.CASCADE,
)
similar_object_id = models.IntegerField()
similar_object = GenericForeignKey("similar_content_type", "similar_object_id")
score = models.FloatField(default=0)
objects = SimilarItemManager()
def __str__(self):
return "%s (%s)" % (self.similar_object, self.score)
|
|
# Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import functools
import signal
import six
from stevedore import driver
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import utils as linux_utils
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_constants
from neutron.common import ipv6_utils
from neutron.common import utils
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('pd_dhcp_driver',
default='dibbler',
help=_('Service to handle DHCPv6 Prefix delegation.')),
]
cfg.CONF.register_opts(OPTS)
class PrefixDelegation(object):
def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb,
agent_conf):
self.context = context
self.pmon = pmon
self.intf_driver = intf_driver
self.notifier = notifier
self.routers = {}
self.pd_update_cb = pd_update_cb
self.agent_conf = agent_conf
self.pd_dhcp_driver = driver.DriverManager(
namespace='neutron.agent.linux.pd_drivers',
name=agent_conf.prefix_delegation_driver,
).driver
registry.subscribe(add_router,
resources.ROUTER,
events.BEFORE_CREATE)
registry.subscribe(remove_router,
resources.ROUTER,
events.AFTER_DELETE)
self._get_sync_data()
@utils.synchronized("l3-agent-pd")
def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac):
router = self.routers.get(router_id)
if router is None:
return
pd_info = router['subnets'].get(subnet_id)
if not pd_info:
pd_info = PDInfo(ri_ifname=ri_ifname, mac=mac)
router['subnets'][subnet_id] = pd_info
pd_info.bind_lla = self._get_lla(mac)
if pd_info.sync:
pd_info.mac = mac
pd_info.old_prefix = prefix
else:
self._add_lla(router, pd_info.get_bind_lla_with_mask())
def _delete_pd(self, router, pd_info):
self._delete_lla(router, pd_info.get_bind_lla_with_mask())
if pd_info.client_started:
pd_info.driver.disable(self.pmon, router['ns_name'])
@utils.synchronized("l3-agent-pd")
def disable_subnet(self, router_id, subnet_id):
prefix_update = {}
router = self.routers.get(router_id)
if not router:
return
pd_info = router['subnets'].get(subnet_id)
if not pd_info:
return
self._delete_pd(router, pd_info)
prefix_update[subnet_id] = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
del router['subnets'][subnet_id]
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
@utils.synchronized("l3-agent-pd")
def update_subnet(self, router_id, subnet_id, prefix):
router = self.routers.get(router_id)
if router is not None:
pd_info = router['subnets'].get(subnet_id)
if pd_info and pd_info.old_prefix != prefix:
old_prefix = pd_info.old_prefix
pd_info.old_prefix = prefix
return old_prefix
@utils.synchronized("l3-agent-pd")
def add_gw_interface(self, router_id, gw_ifname):
router = self.routers.get(router_id)
prefix_update = {}
if not router:
return
router['gw_interface'] = gw_ifname
for subnet_id, pd_info in six.iteritems(router['subnets']):
# gateway is added after internal router ports.
# If a PD is being synced, and if the prefix is available,
# send update if prefix out of sync; If not available,
# start the PD client
bind_lla_with_mask = pd_info.get_bind_lla_with_mask()
if pd_info.sync:
pd_info.sync = False
if pd_info.client_started:
if pd_info.prefix != pd_info.old_prefix:
prefix_update['subnet_id'] = pd_info.prefix
else:
self._delete_lla(router, bind_lla_with_mask)
self._add_lla(router, bind_lla_with_mask)
else:
self._add_lla(router, bind_lla_with_mask)
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
def delete_router_pd(self, router):
prefix_update = {}
for subnet_id, pd_info in six.iteritems(router['subnets']):
self._delete_lla(router, pd_info.get_bind_lla_with_mask())
if pd_info.client_started:
pd_info.driver.disable(self.pmon, router['ns_name'])
pd_info.prefix = None
pd_info.client_started = False
prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
prefix_update[subnet_id] = prefix
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
@utils.synchronized("l3-agent-pd")
def remove_gw_interface(self, router_id):
router = self.routers.get(router_id)
if router is not None:
router['gw_interface'] = None
self.delete_router_pd(router)
@utils.synchronized("l3-agent-pd")
def sync_router(self, router_id):
router = self.routers.get(router_id)
if router is not None and router['gw_interface'] is None:
self.delete_router_pd(router)
@utils.synchronized("l3-agent-pd")
def remove_stale_ri_ifname(self, router_id, stale_ifname):
router = self.routers.get(router_id)
if router is not None:
for subnet_id, pd_info in router['subnets'].items():
if pd_info.ri_ifname == stale_ifname:
self._delete_pd(router, pd_info)
del router['subnets'][subnet_id]
@staticmethod
def _get_lla(mac):
lla = ipv6_utils.get_ipv6_addr_by_EUI64(l3_constants.IPV6_LLA_PREFIX,
mac)
return lla
def _get_llas(self, gw_ifname, ns_name):
try:
return self.intf_driver.get_ipv6_llas(gw_ifname, ns_name)
except RuntimeError:
# The error message was printed as part of the driver call
# This could happen if the gw_ifname was removed
# simply return and exit the thread
return
def _add_lla(self, router, lla_with_mask):
if router['gw_interface']:
self.intf_driver.add_ipv6_addr(router['gw_interface'],
lla_with_mask,
router['ns_name'],
'link')
# There is a delay before the LLA becomes active.
# This is because the kernal runs DAD to make sure LLA uniqueness
# Spawn a thread to wait for the interface to be ready
self._spawn_lla_thread(router['gw_interface'],
router['ns_name'],
lla_with_mask)
def _spawn_lla_thread(self, gw_ifname, ns_name, lla_with_mask):
eventlet.spawn_n(self._ensure_lla_task,
gw_ifname,
ns_name,
lla_with_mask)
def _delete_lla(self, router, lla_with_mask):
if lla_with_mask and router['gw_interface']:
try:
self.intf_driver.delete_ipv6_addr(router['gw_interface'],
lla_with_mask,
router['ns_name'])
except RuntimeError:
# Ignore error if the lla doesn't exist
pass
def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask):
# It would be insane for taking so long unless DAD test failed
# In that case, the subnet would never be assigned a prefix.
linux_utils.wait_until_true(functools.partial(self._lla_available,
gw_ifname,
ns_name,
lla_with_mask),
timeout=l3_constants.LLA_TASK_TIMEOUT,
sleep=2)
def _lla_available(self, gw_ifname, ns_name, lla_with_mask):
llas = self._get_llas(gw_ifname, ns_name)
if self._is_lla_active(lla_with_mask, llas):
LOG.debug("LLA %s is active now" % lla_with_mask)
self.pd_update_cb()
return True
@staticmethod
def _is_lla_active(lla_with_mask, llas):
for lla in llas:
if lla_with_mask == lla['cidr']:
return not lla['tentative']
return False
@utils.synchronized("l3-agent-pd")
def process_prefix_update(self):
LOG.debug("Processing IPv6 PD Prefix Update")
prefix_update = {}
for router_id, router in six.iteritems(self.routers):
if not router['gw_interface']:
continue
llas = None
for subnet_id, pd_info in six.iteritems(router['subnets']):
if pd_info.client_started:
prefix = pd_info.driver.get_prefix()
if prefix != pd_info.prefix:
pd_info.prefix = prefix
prefix_update[subnet_id] = prefix
else:
if not llas:
llas = self._get_llas(router['gw_interface'],
router['ns_name'])
if self._is_lla_active(pd_info.get_bind_lla_with_mask(),
llas):
if not pd_info.driver:
pd_info.driver = self.pd_dhcp_driver(
router_id, subnet_id, pd_info.ri_ifname)
pd_info.driver.enable(self.pmon, router['ns_name'],
router['gw_interface'],
pd_info.bind_lla)
pd_info.client_started = True
if prefix_update:
LOG.debug("Update server with prefixes: %s", prefix_update)
self.notifier(self.context, prefix_update)
def after_start(self):
LOG.debug('SIGHUP signal handler set')
signal.signal(signal.SIGHUP, self._handle_sighup)
def _handle_sighup(self, signum, frame):
# The external DHCPv6 client uses SIGHUP to notify agent
# of prefix changes.
self.pd_update_cb()
def _get_sync_data(self):
sync_data = self.pd_dhcp_driver.get_sync_data()
for pd_info in sync_data:
router_id = pd_info.router_id
if not self.routers.get(router_id):
self.routers[router_id] = {'gw_interface': None,
'ns_name': None,
'subnets': {}}
new_pd_info = PDInfo(pd_info=pd_info)
subnets = self.routers[router_id]['subnets']
subnets[pd_info.subnet_id] = new_pd_info
@utils.synchronized("l3-agent-pd")
def remove_router(resource, event, l3_agent, **kwargs):
router_id = kwargs['router'].router_id
router = l3_agent.pd.routers.get(router_id)
l3_agent.pd.delete_router_pd(router)
del l3_agent.pd.routers[router_id]['subnets']
del l3_agent.pd.routers[router_id]
def get_router_entry(ns_name):
return {'gw_interface': None,
'ns_name': ns_name,
'subnets': {}}
@utils.synchronized("l3-agent-pd")
def add_router(resource, event, l3_agent, **kwargs):
added_router = kwargs['router']
router = l3_agent.pd.routers.get(added_router.router_id)
if not router:
l3_agent.pd.routers[added_router.router_id] = (
get_router_entry(added_router.ns_name))
else:
# This will happen during l3 agent restart
router['ns_name'] = added_router.ns_name
class PDInfo(object):
"""A class to simplify storing and passing of information relevant to
Prefix Delegation operations for a given subnet.
"""
def __init__(self, pd_info=None, ri_ifname=None, mac=None):
if pd_info is None:
self.prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
self.old_prefix = l3_constants.PROVISIONAL_IPV6_PD_PREFIX
self.ri_ifname = ri_ifname
self.mac = mac
self.bind_lla = None
self.sync = False
self.driver = None
self.client_started = False
else:
self.prefix = pd_info.prefix
self.old_prefix = None
self.ri_ifname = pd_info.ri_ifname
self.mac = None
self.bind_lla = None
self.sync = True
self.driver = pd_info.driver
self.client_started = pd_info.client_started
def get_bind_lla_with_mask(self):
bind_lla_with_mask = '%s/64' % self.bind_lla
return bind_lla_with_mask
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script fetches and prepares an SDK chroot.
"""
import os
import sys
import urlparse
from chromite.buildbot import constants
from chromite.lib import cgroups
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import locking
from chromite.lib import osutils
from chromite.lib import toolchain
cros_build_lib.STRICT_SUDO = True
COMPRESSION_PREFERENCE = ('xz', 'bz2')
# TODO(zbehan): Remove the dependency on these, reimplement them in python
MAKE_CHROOT = [os.path.join(constants.SOURCE_ROOT,
'src/scripts/sdk_lib/make_chroot.sh')]
ENTER_CHROOT = [os.path.join(constants.SOURCE_ROOT,
'src/scripts/sdk_lib/enter_chroot.sh')]
# We need these tools to run. Very common tools (tar,..) are ommited.
NEEDED_TOOLS = ('curl', 'xz', 'unshare')
def GetArchStageTarballs(version):
"""Returns the URL for a given arch/version"""
suburl = '%s/coreos-sdk-amd64-%s.tar.bz2' % (version, version)
return [toolchain.GetSdkURL(suburl=suburl)]
def GetStage3Urls(version):
return GetArchStageTarballs(version)
def FetchRemoteTarballs(storage_dir, urls):
"""Fetches a tarball given by url, and place it in sdk/.
Args:
urls: List of URLs to try to download. Download will stop on first success.
Returns:
Full path to the downloaded file
"""
# Note we track content length ourselves since certain versions of curl
# fail if asked to resume a complete file.
# pylint: disable=C0301,W0631
# https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3482927&group_id=976
for url in urls:
# http://www.logilab.org/ticket/8766
# pylint: disable=E1101
parsed = urlparse.urlparse(url)
tarball_name = os.path.basename(parsed.path)
if parsed.scheme in ('', 'file'):
if os.path.exists(parsed.path):
return parsed.path
continue
content_length = 0
print 'Attempting download: %s' % url
result = cros_build_lib.RunCurl(
['-I', url], redirect_stdout=True, redirect_stderr=True,
print_cmd=False)
successful = False
for header in result.output.splitlines():
# We must walk the output to find the 200 code for use cases where
# a proxy is involved and may have pushed down the actual header.
if (header.startswith("HTTP/1.0 200") or
header.startswith("HTTP/1.1 200") or
header.startswith("HTTP/2.0 200") or
header.startswith("HTTP/2 200")):
successful = True
elif header.lower().startswith("content-length:"):
content_length = int(header.split(":", 1)[-1].strip())
if successful:
break
if successful:
break
else:
raise Exception('No valid URLs found!')
tarball_dest = os.path.join(storage_dir, tarball_name)
current_size = 0
if os.path.exists(tarball_dest):
current_size = os.path.getsize(tarball_dest)
if current_size > content_length:
osutils.SafeUnlink(tarball_dest)
current_size = 0
if current_size < content_length:
cros_build_lib.RunCurl(
['-f', '-L', '-y', '30', '-C', '-', '--output', tarball_dest, url],
print_cmd=False)
# Cleanup old tarballs now since we've successfull fetched; only cleanup
# the tarballs for our prefix, or unknown ones.
ignored_prefix = ('stage3-' if tarball_name.startswith('cros-sdk-')
else 'cros-sdk-')
for filename in os.listdir(storage_dir):
if filename == tarball_name or filename.startswith(ignored_prefix):
continue
print 'Cleaning up old tarball: %s' % (filename,)
osutils.SafeUnlink(os.path.join(storage_dir, filename))
return tarball_dest
def CreateChroot(chroot_path, sdk_tarball, cache_dir,
nousepkg=False, nogetbinpkg=False):
"""Creates a new chroot from a given SDK"""
cmd = MAKE_CHROOT + ['--stage3_path', sdk_tarball,
'--chroot', chroot_path,
'--cache_dir', cache_dir]
if nousepkg:
cmd.append('--nousepkg')
elif nogetbinpkg:
cmd.append('--nogetbinpkg')
try:
cros_build_lib.RunCommand(cmd, print_cmd=False)
except cros_build_lib.RunCommandError:
raise SystemExit('Running %r failed!' % cmd)
def DeleteChroot(chroot_path):
"""Deletes an existing chroot"""
cmd = MAKE_CHROOT + ['--chroot', chroot_path,
'--delete']
try:
cros_build_lib.RunCommand(cmd, print_cmd=False)
except cros_build_lib.RunCommandError:
raise SystemExit('Running %r failed!' % cmd)
def EnterChroot(chroot_path, cache_dir, chrome_root, chrome_root_mount,
additional_args):
"""Enters an existing SDK chroot"""
cmd = ENTER_CHROOT + ['--chroot', chroot_path, '--cache_dir', cache_dir]
if chrome_root:
cmd.extend(['--chrome_root', chrome_root])
if chrome_root_mount:
cmd.extend(['--chrome_root_mount', chrome_root_mount])
if len(additional_args) > 0:
cmd.append('--')
cmd.extend(additional_args)
ret = cros_build_lib.RunCommand(cmd, print_cmd=False, error_code_ok=True)
# If we were in interactive mode, ignore the exit code; it'll be whatever
# they last ran w/in the chroot and won't matter to us one way or another.
# Note this does allow chroot entrance to fail and be ignored during
# interactive; this is however a rare case and the user will immediately
# see it (nor will they be checking the exit code manually).
if ret.returncode != 0 and additional_args:
raise SystemExit('Running %r failed with exit code %i'
% (cmd, ret.returncode))
def _SudoCommand():
"""Get the 'sudo' command, along with all needed environment variables."""
# Pass in the ENVIRONMENT_WHITELIST variable so that scripts in the chroot
# know what variables to pass through.
cmd = ['sudo']
for key in constants.CHROOT_ENVIRONMENT_WHITELIST:
value = os.environ.get(key)
if value is not None:
cmd += ['%s=%s' % (key, value)]
# Pass in the path to the depot_tools so that users can access them from
# within the chroot.
gclient = osutils.Which('gclient')
if gclient is not None:
cmd += ['DEPOT_TOOLS=%s' % os.path.realpath(os.path.dirname(gclient))]
return cmd
def _ReExecuteIfNeeded(argv):
"""Re-execute cros_sdk as root.
Also unshare the mount namespace so as to ensure that processes outside
the chroot can't mess with our mounts.
"""
MAGIC_VAR = '%CROS_SDK_MOUNT_NS'
if os.geteuid() != 0:
cmd = _SudoCommand() + ['--'] + argv
os.execvp(cmd[0], cmd)
elif os.environ.get(MAGIC_VAR, '0') == '0':
cgroups.Cgroup.InitSystem()
os.environ[MAGIC_VAR] = '1'
os.execvp('unshare', ['unshare', '-m', '--'] + argv)
else:
os.environ.pop(MAGIC_VAR)
def main(argv):
usage = """usage: %prog [options] [VAR1=val1 .. VARn=valn -- args]
This script is used for manipulating local chroot environments; creating,
deleting, downloading, etc. If given --enter (or no args), it defaults
to an interactive bash shell within the chroot.
If given args those are passed to the chroot environment, and executed."""
conf = cros_build_lib.LoadKeyValueFile(
os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE),
ignore_missing=True)
sdk_latest_version = conf.get('COREOS_SDK_VERSION', '<unknown>')
parser = commandline.OptionParser(usage=usage, caching=True)
commands = parser.add_option_group("Commands")
commands.add_option(
'--enter', action='store_true', default=False,
help='Enter the SDK chroot. Implies --create.')
commands.add_option(
'--create', action='store_true',default=False,
help='Create the chroot only if it does not already exist. '
'Implies --download.')
commands.add_option(
'--bootstrap', action='store_true', default=False,
help='Build everything from scratch, including the sdk. '
'Use this only if you need to validate a change '
'that affects SDK creation itself (toolchain and '
'build are typically the only folk who need this). '
'Note this will quite heavily slow down the build. '
'This option implies --create --nousepkg.')
commands.add_option(
'-r', '--replace', action='store_true', default=False,
help='Replace an existing SDK chroot. Basically an alias '
'for --delete --create.')
commands.add_option(
'--delete', action='store_true', default=False,
help='Delete the current SDK chroot if it exists.')
commands.add_option(
'--download', action='store_true', default=False,
help='Download the sdk.')
# Global options:
default_chroot = os.path.join(constants.SOURCE_ROOT,
constants.DEFAULT_CHROOT_DIR)
parser.add_option(
'--chroot', dest='chroot', default=default_chroot, type='path',
help=('SDK chroot dir name [%s]' % constants.DEFAULT_CHROOT_DIR))
parser.add_option('--chrome_root', default=None, type='path',
help='Mount this chrome root into the SDK chroot')
parser.add_option('--chrome_root_mount', default=None, type='path',
help='Mount chrome into this path inside SDK chroot')
parser.add_option('--nousepkg', action='store_true', default=False,
help='Do not use binary packages when creating a chroot.')
parser.add_option('--nogetbinpkg', action='store_true', default=False,
help='Do not fetch remote binary packages.')
parser.add_option('-u', '--url',
dest='sdk_url', default=None,
help=('''Use sdk tarball located at this url.
Use file:// for local files.'''))
parser.add_option('--sdk-version', default=sdk_latest_version,
help='Use this sdk version. Current is %default.')
options, chroot_command = parser.parse_args(argv)
# Some sanity checks first, before we ask for sudo credentials.
cros_build_lib.AssertOutsideChroot()
host = os.uname()[4]
if host != 'x86_64':
parser.error(
"cros_sdk is currently only supported on x86_64; you're running"
" %s. Please find a x86_64 machine." % (host,))
missing = osutils.FindMissingBinaries(NEEDED_TOOLS)
if missing:
parser.error((
'The tool(s) %s were not found.\n'
'Please install the appropriate package in your host.\n'
'Example(ubuntu):\n'
' sudo apt-get install <packagename>'
% (', '.join(missing))))
_ReExecuteIfNeeded([sys.argv[0]] + argv)
# Expand out the aliases...
if options.replace:
options.delete = options.create = True
if options.bootstrap:
options.create = True
# If a command is not given, default to enter.
options.enter |= not any(getattr(options, x.dest)
for x in commands.option_list)
options.enter |= bool(chroot_command)
if options.enter and options.delete and not options.create:
parser.error("Trying to enter the chroot when --delete "
"was specified makes no sense.")
# Finally, discern if we need to create the chroot.
chroot_exists = os.path.exists(options.chroot)
if options.create or options.enter:
# Only create if it's being wiped, or if it doesn't exist.
if not options.delete and chroot_exists:
options.create = False
else:
options.download = True
# Finally, flip create if necessary.
if options.enter:
options.create |= not chroot_exists
# Based on selections, fetch the tarball.
if options.sdk_url:
urls = [options.sdk_url]
else:
urls = GetArchStageTarballs(options.sdk_version)
lock_path = os.path.dirname(options.chroot)
lock_path = os.path.join(lock_path,
'.%s_lock' % os.path.basename(options.chroot))
with cgroups.SimpleContainChildren('cros_sdk'):
with locking.FileLock(lock_path, 'chroot lock') as lock:
if options.delete and os.path.exists(options.chroot):
lock.write_lock()
DeleteChroot(options.chroot)
sdk_cache = os.path.join(options.cache_dir, 'sdks')
distfiles_cache = os.path.join(options.cache_dir, 'distfiles')
osutils.SafeMakedirs(options.cache_dir)
for target in (sdk_cache, distfiles_cache):
src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target))
if not os.path.exists(src):
osutils.SafeMakedirs(target)
continue
lock.write_lock(
"Upgrade to %r needed but chroot is locked; please exit "
"all instances so this upgrade can finish." % src)
if not os.path.exists(src):
# Note that while waiting for the write lock, src may've vanished;
# it's a rare race during the upgrade process that's a byproduct
# of us avoiding taking a write lock to do the src check. If we
# took a write lock for that check, it would effectively limit
# all cros_sdk for a chroot to a single instance.
osutils.SafeMakedirs(target)
elif not os.path.exists(target):
# Upgrade occurred, but a reversion, or something whacky
# occurred writing to the old location. Wipe and continue.
os.rename(src, target)
else:
# Upgrade occurred once already, but either a reversion or
# some before/after separate cros_sdk usage is at play.
# Wipe and continue.
osutils.RmDir(src)
if options.download:
lock.write_lock()
sdk_tarball = FetchRemoteTarballs(sdk_cache, urls)
if options.create:
lock.write_lock()
CreateChroot(options.chroot, sdk_tarball, options.cache_dir,
nousepkg=(options.bootstrap or options.nousepkg),
nogetbinpkg=options.nogetbinpkg)
if options.enter:
lock.read_lock()
EnterChroot(options.chroot, options.cache_dir, options.chrome_root,
options.chrome_root_mount, chroot_command)
|
|
# -*- coding: UTF-8 -*-
import sys
import time
from vulkan import *
from PySide2 import (QtGui, QtCore)
import numpy as np
from PIL import Image
import glm
validationLayers = [
'VK_LAYER_LUNARG_standard_validation'
]
deviceExtensions = [
VK_KHR_SWAPCHAIN_EXTENSION_NAME
]
enableValidationLayers = True
class InstanceProcAddr(object):
T = None
def __init__(self, func):
self.__func = func
def __call__(self, *args, **kwargs):
funcName = self.__func.__name__
func = InstanceProcAddr.procfunc(funcName)
if func:
return func(*args, **kwargs)
else:
return VK_ERROR_EXTENSION_NOT_PRESENT
@staticmethod
def procfunc(funcName):
return vkGetInstanceProcAddr(InstanceProcAddr.T, funcName)
class DeviceProcAddr(InstanceProcAddr):
@staticmethod
def procfunc(funcName):
return vkGetDeviceProcAddr(InstanceProcAddr.T, funcName)
# instance ext functions
@InstanceProcAddr
def vkCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroyDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkCreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroySurfaceKHR(instance, surface, pAllocator):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface):
pass
# device ext functions
@DeviceProcAddr
def vkCreateSwapchainKHR(device, pCreateInfo, pAllocator):
pass
@DeviceProcAddr
def vkDestroySwapchainKHR(device, swapchain, pAllocator):
pass
@DeviceProcAddr
def vkGetSwapchainImagesKHR(device, swapchain):
pass
@DeviceProcAddr
def vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence):
pass
@DeviceProcAddr
def vkQueuePresentKHR(queue, pPresentInfo):
pass
def debugCallback(*args):
print('DEBUG: {} {}'.format(args[5], args[6]))
return 0
class Win32misc(object):
@staticmethod
def getInstance(hWnd):
from cffi import FFI as _FFI
_ffi = _FFI()
_ffi.cdef('long __stdcall GetWindowLongA(void* hWnd, int nIndex);')
_lib = _ffi.dlopen('User32.dll')
return _lib.GetWindowLongA(_ffi.cast('void*', hWnd), -6) # GWL_HINSTANCE
class QueueFamilyIndices(object):
def __init__(self):
self.graphicsFamily = -1
self.presentFamily = -1
@property
def isComplete(self):
return self.graphicsFamily >= 0 and self.presentFamily >= 0
class SwapChainSupportDetails(object):
def __init__(self):
self.capabilities = None
self.formats = None
self.presentModes = None
class Vertex(object):
POS = np.array([0, 0], np.float32)
COLOR = np.array([0, 0, 0], np.float32)
TEXCOORD = np.array([0, 0], np.float32)
# def __init__(self):
# self.pos = []
# self.color = []
@staticmethod
def getBindingDescription():
bindingDescription = VkVertexInputBindingDescription(
binding=0,
stride=Vertex.POS.nbytes + Vertex.COLOR.nbytes + Vertex.TEXCOORD.nbytes,
inputRate=VK_VERTEX_INPUT_RATE_VERTEX
)
return bindingDescription
@staticmethod
def getAttributeDescriptions():
pos = VkVertexInputAttributeDescription(
location=0,
binding=0,
format=VK_FORMAT_R32G32_SFLOAT,
offset=0
)
color = VkVertexInputAttributeDescription(
location=1,
binding=0,
format=VK_FORMAT_R32G32B32_SFLOAT,
offset=Vertex.POS.nbytes,
)
texcoord = VkVertexInputAttributeDescription(
location=2,
binding=0,
format=VK_FORMAT_R32G32_SFLOAT,
offset=Vertex.POS.nbytes+Vertex.COLOR.nbytes,
)
return [pos, color, texcoord]
class UniformBufferObject(object):
def __init__(self):
self.model = np.identity(4, np.float32)
self.view = np.identity(4, np.float32)
self.proj = np.identity(4, np.float32)
def toArray(self):
return np.concatenate((self.model, self.view, self.proj))
@property
def nbytes(self):
return self.proj.nbytes + self.view.nbytes + self.model.nbytes
class HelloTriangleApplication(QtGui.QWindow):
def __init__(self):
super(HelloTriangleApplication, self).__init__()
self.setWidth(1280)
self.setHeight(720)
self.setMinimumWidth(40)
self.setMinimumHeight(40)
self.setTitle("Vulkan Python - PySide2")
# self.setSurfaceType(self.OpenGLSurface)
self.__instance = None
self.__callbcak = None
self.__surface = None
self.__physicalDevice = None
self.__device = None
self.__graphicQueue = None
self.__presentQueue = None
self.__swapChain = None
self.__swapChainImages = []
self.__swapChainImageFormat = None
self.__swapChainExtent = None
self.__swapChainImageViews = []
self.__swapChainFramebuffers = []
self.__renderpass = None
self.__pipeline = None
self.__pipelineLayout = None
self.__commandPool = None
self.__commandBuffers = []
self.__imageAvailableSemaphore = None
self.__renderFinishedSemaphore = None
self.__textureImage = None
self.__textureImageMemory = None
self.__textureImageView = None
self.__textureSampler = None
self.__vertexBuffer = None
self.__vertexBufferMemory = None
self.__indexBuffer = None
self.__indexBufferMemory = None
self.__descriptorPool = None
self.__descriptorSet = None
self.__descriptorSetLayout = None
self.__uniformBuffer = None
self.__uniformBufferMemory = None
self.__vertices = np.array([
# pos color texCoord
-.5, -.5, 1, 0, 0, 1, 0,
.5, -.5, 0, 1, 0, 0, 0,
.5, .5, 0, 0, 1, 0, 1,
-.5, .5, 1, 1, 1, 1, 1
], np.float32)
self.__indices = np.array([0, 1, 2, 2, 3, 0], np.uint16)
self.__ubo = UniformBufferObject()
self.__startTime = time.time()
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.render)
self.initVulkan()
self.timer.start()
def __del__(self):
vkDeviceWaitIdle(self.__device)
if self.__textureSampler:
vkDestroySampler(self.__device, self.__textureSampler, None)
if self.__textureImageView:
vkDestroyImageView(self.__device, self.__textureImageView, None)
if self.__textureImage:
vkDestroyImage(self.__device, self.__textureImage, None)
if self.__textureImageMemory:
vkFreeMemory(self.__device, self.__textureImageMemory, None)
if self.__descriptorPool:
vkDestroyDescriptorPool(self.__device, self.__descriptorPool, None)
if self.__uniformBuffer:
vkDestroyBuffer(self.__device, self.__uniformBuffer, None)
if self.__uniformBufferMemory:
vkFreeMemory(self.__device, self.__uniformBufferMemory, None)
if self.__vertexBuffer:
vkDestroyBuffer(self.__device, self.__vertexBuffer, None)
if self.__vertexBufferMemory:
vkFreeMemory(self.__device, self.__vertexBufferMemory, None)
if self.__indexBuffer:
vkDestroyBuffer(self.__device, self.__indexBuffer, None)
if self.__indexBufferMemory:
vkFreeMemory(self.__device, self.__indexBufferMemory, None)
if self.__imageAvailableSemaphore:
vkDestroySemaphore(self.__device, self.__imageAvailableSemaphore, None)
if self.__renderFinishedSemaphore:
vkDestroySemaphore(self.__device, self.__renderFinishedSemaphore, None)
if self.__descriptorSetLayout:
vkDestroyDescriptorSetLayout(self.__device, self.__descriptorSetLayout, None)
self.__cleanupSwapChain()
if self.__commandPool:
vkDestroyCommandPool(self.__device, self.__commandPool, None)
if self.__device:
vkDestroyDevice(self.__device, None)
if self.__callbcak:
vkDestroyDebugReportCallbackEXT(self.__instance, self.__callbcak, None)
if self.__surface:
vkDestroySurfaceKHR(self.__instance, self.__surface, None)
if self.__instance:
vkDestroyInstance(self.__instance, None)
print('instance destroyed')
self.destroy()
def __cleanupSwapChain(self):
[vkDestroyFramebuffer(self.__device, i, None) for i in self.__swapChainFramebuffers]
self.__swapChainFramebuffers = []
vkFreeCommandBuffers(self.__device, self.__commandPool, len(self.__commandBuffers), self.__commandBuffers)
self.__swapChainFramebuffers = []
vkDestroyPipeline(self.__device, self.__pipeline, None)
vkDestroyPipelineLayout(self.__device, self.__pipelineLayout, None)
vkDestroyRenderPass(self.__device, self.__renderpass, None)
[vkDestroyImageView(self.__device, i, None) for i in self.__swapChainImageViews]
self.__swapChainImageViews = []
vkDestroySwapchainKHR(self.__device, self.__swapChain, None)
def __recreateSwapChain(self):
vkDeviceWaitIdle(self.__device)
self.__cleanupSwapChain()
self.__createSwapChain()
self.__createImageViews()
self.__createRenderPass()
self.__createGraphicsPipeline()
self.__createFrambuffers()
self.__createCommandBuffers()
def initVulkan(self):
self.__cretaeInstance()
self.__setupDebugCallback()
self.__createSurface()
self.__pickPhysicalDevice()
self.__createLogicalDevice()
self.__createSwapChain()
self.__createImageViews()
self.__createRenderPass()
self.__createDescriptorSetLayout()
self.__createGraphicsPipeline()
self.__createFrambuffers()
self.__createCommandPool()
self.__createTextureImage()
self.__createTextureImageView()
self.__createTextureSampler()
self.__createVertexBuffer()
self.__createIndexBuffer()
self.__createUniformBuffer()
self.__createDescriptorPool()
self.__createDescriptorSet()
self.__createCommandBuffers()
self.__createSemaphores()
def __cretaeInstance(self):
if enableValidationLayers and not self.__checkValidationLayerSupport():
raise Exception("validation layers requested, but not available!")
appInfo = VkApplicationInfo(
# sType=VK_STRUCTURE_TYPE_APPLICATION_INFO,
pApplicationName='Python VK',
applicationVersion=VK_MAKE_VERSION(1, 0, 0),
pEngineName='pyvulkan',
engineVersion=VK_MAKE_VERSION(1, 0, 0),
apiVersion=VK_API_VERSION
)
extenstions = self.__getRequiredExtensions()
if enableValidationLayers:
instanceInfo = VkInstanceCreateInfo(
pApplicationInfo=appInfo,
# enabledLayerCount=len(validationLayers),
ppEnabledLayerNames=validationLayers,
# enabledExtensionCount=len(extenstions),
ppEnabledExtensionNames=extenstions
)
else:
instanceInfo = VkInstanceCreateInfo(
pApplicationInfo=appInfo,
enabledLayerCount=0,
# enabledExtensionCount=len(extenstions),
ppEnabledExtensionNames=extenstions
)
self.__instance = vkCreateInstance(instanceInfo, None)
InstanceProcAddr.T = self.__instance
def __setupDebugCallback(self):
if not enableValidationLayers:
return
createInfo = VkDebugReportCallbackCreateInfoEXT(
flags=VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT,
pfnCallback=debugCallback
)
self.__callbcak = vkCreateDebugReportCallbackEXT(self.__instance, createInfo, None)
def __createSurface(self):
if sys.platform == 'win32':
hwnd = self.winId()
hinstance = Win32misc.getInstance(hwnd)
createInfo = VkWin32SurfaceCreateInfoKHR(
hinstance=hinstance,
hwnd=hwnd
)
self.__surface = vkCreateWin32SurfaceKHR(self.__instance, createInfo, None)
# elif sys.platform == 'linux':
# pass
def __pickPhysicalDevice(self):
physicalDevices = vkEnumeratePhysicalDevices(self.__instance)
for device in physicalDevices:
if self.__isDeviceSuitable(device):
self.__physicalDevice = device
break
assert self.__physicalDevice != None
def __createLogicalDevice(self):
indices = self.__findQueueFamilies(self.__physicalDevice)
uniqueQueueFamilies = {}.fromkeys([indices.graphicsFamily, indices.presentFamily])
queueCreateInfos = []
for i in uniqueQueueFamilies:
queueCreateInfo = VkDeviceQueueCreateInfo(
queueFamilyIndex=i,
queueCount=1,
pQueuePriorities=[1.0]
)
queueCreateInfos.append(queueCreateInfo)
deviceFeatures = VkPhysicalDeviceFeatures()
deviceFeatures.samplerAnisotropy = True
if enableValidationLayers:
createInfo = VkDeviceCreateInfo(
# queueCreateInfoCount=len(queueCreateInfos),
pQueueCreateInfos=queueCreateInfos,
# enabledExtensionCount=len(deviceExtensions),
ppEnabledExtensionNames=deviceExtensions,
# enabledLayerCount=len(validationLayers),
ppEnabledLayerNames=validationLayers,
pEnabledFeatures=deviceFeatures
)
else:
createInfo = VkDeviceCreateInfo(
queueCreateInfoCount=1,
pQueueCreateInfos=queueCreateInfo,
# enabledExtensionCount=len(deviceExtensions),
ppEnabledExtensionNames=deviceExtensions,
enabledLayerCount=0,
pEnabledFeatures=deviceFeatures
)
self.__device = vkCreateDevice(self.__physicalDevice, createInfo, None)
DeviceProcAddr.T = self.__device
self.__graphicQueue = vkGetDeviceQueue(self.__device, indices.graphicsFamily, 0)
self.__presentQueue = vkGetDeviceQueue(self.__device, indices.presentFamily, 0)
def __createSwapChain(self):
swapChainSupport = self.__querySwapChainSupport(self.__physicalDevice)
surfaceFormat = self.__chooseSwapSurfaceFormat(swapChainSupport.formats)
presentMode = self.__chooseSwapPresentMode(swapChainSupport.presentModes)
extent = self.__chooseSwapExtent(swapChainSupport.capabilities)
imageCount = swapChainSupport.capabilities.minImageCount + 1
if swapChainSupport.capabilities.maxImageCount > 0 and imageCount > swapChainSupport.capabilities.maxImageCount:
imageCount = swapChainSupport.capabilities.maxImageCount
indices = self.__findQueueFamilies(self.__physicalDevice)
queueFamily = {}.fromkeys([indices.graphicsFamily, indices.presentFamily])
queueFamilies = list(queueFamily.keys())
if len(queueFamilies) > 1:
createInfo = VkSwapchainCreateInfoKHR(
surface=self.__surface,
minImageCount=imageCount,
imageFormat=surfaceFormat.format,
imageColorSpace=surfaceFormat.colorSpace,
imageExtent=extent,
imageArrayLayers=1,
imageUsage=VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
# queueFamilyIndexCount=len(queueFamilies),
pQueueFamilyIndices=queueFamilies,
imageSharingMode=VK_SHARING_MODE_CONCURRENT,
preTransform=swapChainSupport.capabilities.currentTransform,
compositeAlpha=VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
presentMode=presentMode,
clipped=True
)
else:
createInfo = VkSwapchainCreateInfoKHR(
surface=self.__surface,
minImageCount=imageCount,
imageFormat=surfaceFormat.format,
imageColorSpace=surfaceFormat.colorSpace,
imageExtent=extent,
imageArrayLayers=1,
imageUsage=VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
# queueFamilyIndexCount=len(queueFamilies),
pQueueFamilyIndices=queueFamilies,
imageSharingMode=VK_SHARING_MODE_EXCLUSIVE,
preTransform=swapChainSupport.capabilities.currentTransform,
compositeAlpha=VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
presentMode=presentMode,
clipped=True
)
self.__swapChain = vkCreateSwapchainKHR(self.__device, createInfo, None)
assert self.__swapChain != None
self.__swapChainImages = vkGetSwapchainImagesKHR(self.__device, self.__swapChain)
self.__swapChainImageFormat = surfaceFormat.format
self.__swapChainExtent = extent
def __createImageViews(self):
self.__swapChainImageViews = []
for i, image in enumerate(self.__swapChainImages):
self.__swapChainImageViews.append(self.__createImageView(image, self.__swapChainImageFormat))
def __createRenderPass(self):
colorAttachment = VkAttachmentDescription(
format=self.__swapChainImageFormat,
samples=VK_SAMPLE_COUNT_1_BIT,
loadOp=VK_ATTACHMENT_LOAD_OP_CLEAR,
storeOp=VK_ATTACHMENT_STORE_OP_STORE,
stencilLoadOp=VK_ATTACHMENT_LOAD_OP_DONT_CARE,
stencilStoreOp=VK_ATTACHMENT_STORE_OP_DONT_CARE,
initialLayout=VK_IMAGE_LAYOUT_UNDEFINED,
finalLayout=VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
)
colorAttachmentRef = VkAttachmentReference(
0,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
)
subpass = VkSubpassDescription(
pipelineBindPoint=VK_PIPELINE_BIND_POINT_GRAPHICS,
pColorAttachments=[colorAttachmentRef]
)
renderPassInfo = VkRenderPassCreateInfo(
pAttachments=[colorAttachment],
pSubpasses=[subpass]
)
self.__renderpass = vkCreateRenderPass(self.__device, renderPassInfo, None)
def __createDescriptorSetLayout(self):
uboLayoutBinding = VkDescriptorSetLayoutBinding(
binding=0,
descriptorType=VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
descriptorCount=1,
stageFlags=VK_SHADER_STAGE_VERTEX_BIT
)
samplerLayoutBinding = VkDescriptorSetLayoutBinding(
binding=1,
descriptorType=VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
descriptorCount=1,
stageFlags=VK_SHADER_STAGE_FRAGMENT_BIT
)
layoutInfo = VkDescriptorSetLayoutCreateInfo(
pBindings=[uboLayoutBinding, samplerLayoutBinding]
)
self.__descriptorSetLayout = vkCreateDescriptorSetLayout(self.__device, layoutInfo, None)
def __createGraphicsPipeline(self):
vertexShaderMode = self.__createShaderModule('shader/vert.spv')
fragmentShaderMode = self.__createShaderModule('shader/frag.spv')
vertexShaderStageInfo = VkPipelineShaderStageCreateInfo(
stage=VK_SHADER_STAGE_VERTEX_BIT,
module=vertexShaderMode,
pName='main'
)
fragmentShaderStageInfo = VkPipelineShaderStageCreateInfo(
stage=VK_SHADER_STAGE_FRAGMENT_BIT,
module=fragmentShaderMode,
pName='main'
)
shaderStageInfos = [vertexShaderStageInfo, fragmentShaderStageInfo]
bindingDescription = Vertex.getBindingDescription()
attributeDescription = Vertex.getAttributeDescriptions()
vertexInputInfo = VkPipelineVertexInputStateCreateInfo(
# vertexBindingDescriptionCount=0,
pVertexBindingDescriptions=[bindingDescription],
# vertexAttributeDescriptionCount=0,
pVertexAttributeDescriptions=attributeDescription,
)
inputAssembly = VkPipelineInputAssemblyStateCreateInfo(
topology=VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
primitiveRestartEnable=False
)
viewport = VkViewport(0.0, 0.0,
float(self.__swapChainExtent.width),
float(self.__swapChainExtent.height),
0.0, 1.0)
scissor = VkRect2D([0, 0], self.__swapChainExtent)
viewportStage = VkPipelineViewportStateCreateInfo(
viewportCount=1,
pViewports=viewport,
scissorCount=1,
pScissors=scissor
)
rasterizer = VkPipelineRasterizationStateCreateInfo(
depthClampEnable=False,
rasterizerDiscardEnable=False,
polygonMode=VK_POLYGON_MODE_FILL,
lineWidth=1.0,
cullMode=VK_CULL_MODE_BACK_BIT,
frontFace=VK_FRONT_FACE_CLOCKWISE,
depthBiasEnable=False
)
multisampling = VkPipelineMultisampleStateCreateInfo(
sampleShadingEnable=False,
rasterizationSamples=VK_SAMPLE_COUNT_1_BIT
)
colorBlendAttachment = VkPipelineColorBlendAttachmentState(
colorWriteMask=VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
blendEnable=False
)
colorBending = VkPipelineColorBlendStateCreateInfo(
logicOpEnable=False,
logicOp=VK_LOGIC_OP_COPY,
attachmentCount=1,
pAttachments=colorBlendAttachment,
blendConstants=[0.0, 0.0, 0.0, 0.0]
)
pipelineLayoutInfo = VkPipelineLayoutCreateInfo(
# setLayoutCount=0,
pushConstantRangeCount=0,
pSetLayouts=[self.__descriptorSetLayout]
)
self.__pipelineLayout = vkCreatePipelineLayout(self.__device, pipelineLayoutInfo, None)
pipelineInfo = VkGraphicsPipelineCreateInfo(
# stageCount=len(shaderStageInfos),
pStages=shaderStageInfos,
pVertexInputState=vertexInputInfo,
pInputAssemblyState=inputAssembly,
pViewportState=viewportStage,
pRasterizationState=rasterizer,
pMultisampleState=multisampling,
pColorBlendState=colorBending,
layout=self.__pipelineLayout,
renderPass=self.__renderpass,
subpass=0,
basePipelineHandle=VK_NULL_HANDLE
)
self.__pipeline = vkCreateGraphicsPipelines(self.__device, VK_NULL_HANDLE, 1, pipelineInfo, None)#[0]
vkDestroyShaderModule(self.__device, vertexShaderMode, None)
vkDestroyShaderModule(self.__device, fragmentShaderMode, None)
def __createFrambuffers(self):
self.__swapChainFramebuffers = []
for i, iv in enumerate(self.__swapChainImageViews):
framebufferInfo = VkFramebufferCreateInfo(
renderPass=self.__renderpass,
pAttachments=[iv],
width=self.__swapChainExtent.width,
height=self.__swapChainExtent.height,
layers=1
)
self.__swapChainFramebuffers.append(vkCreateFramebuffer(self.__device, framebufferInfo, None))
def __createCommandPool(self):
queueFamilyIndices = self.__findQueueFamilies(self.__physicalDevice)
createInfo = VkCommandPoolCreateInfo(
queueFamilyIndex=queueFamilyIndices.graphicsFamily
)
self.__commandPool = vkCreateCommandPool(self.__device, createInfo, None)
def __createTextureImage(self):
_image = Image.open('textures/texture.jpg')
_image.putalpha(1)
width = _image.width
height = _image.height
imageSize = width * height * 4
stagingBuffer, stagingMem = self.__createBuffer(imageSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
data = vkMapMemory(self.__device, stagingMem, 0, imageSize, 0)
ffi.memmove(data, _image.tobytes(), imageSize)
vkUnmapMemory(self.__device, stagingMem)
del _image
self.__textureImage, self.__textureImageMemory = self.__createImage(width, height,
VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
self.__transitionImageLayout(self.__textureImage, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
self.__copyBufferToImage(stagingBuffer, self.__textureImage, width, height)
self.__transitionImageLayout(self.__textureImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
vkDestroyBuffer(self.__device, stagingBuffer, None)
vkFreeMemory(self.__device, stagingMem, None)
def __createTextureImageView(self):
self.__textureImageView = self.__createImageView(self.__textureImage, VK_FORMAT_R8G8B8A8_UNORM)
def __createTextureSampler(self):
samplerInfo = VkSamplerCreateInfo(
magFilter=VK_FILTER_LINEAR,
minFilter=VK_FILTER_LINEAR,
addressModeU=VK_SAMPLER_ADDRESS_MODE_REPEAT,
addressModeV=VK_SAMPLER_ADDRESS_MODE_REPEAT,
addressModeW=VK_SAMPLER_ADDRESS_MODE_REPEAT,
anisotropyEnable=True,
maxAnisotropy=16,
compareEnable=False,
compareOp=VK_COMPARE_OP_ALWAYS,
borderColor=VK_BORDER_COLOR_INT_OPAQUE_BLACK,
unnormalizedCoordinates=False
)
self.__textureSampler = vkCreateSampler(self.__device, samplerInfo, None)
def __createImageView(self, image, imFormat):
ssr = VkImageSubresourceRange(
VK_IMAGE_ASPECT_COLOR_BIT,
0, 1, 0, 1
)
viewInfo = VkImageViewCreateInfo(
image=image,
viewType=VK_IMAGE_VIEW_TYPE_2D,
format=imFormat,
subresourceRange=ssr
)
return vkCreateImageView(self.__device, viewInfo, None)
def __createImage(self, widht, height, imFormat, tiling, usage, properties):
imageInfo = VkImageCreateInfo(
imageType=VK_IMAGE_TYPE_2D,
extent=[widht, height, 1],
mipLevels=1,
arrayLayers=1,
format=imFormat,
samples=VK_SAMPLE_COUNT_1_BIT,
tiling=tiling,
usage=usage,
sharingMode=VK_SHARING_MODE_EXCLUSIVE,
initialLayout=VK_IMAGE_LAYOUT_UNDEFINED
)
image = vkCreateImage(self.__device, imageInfo, None)
memReuirements = vkGetImageMemoryRequirements(self.__device, image)
allocInfo = VkMemoryAllocateInfo(
allocationSize=memReuirements.size,
memoryTypeIndex=self.__findMemoryType(memReuirements.memoryTypeBits, properties)
)
imageMemory = vkAllocateMemory(self.__device, allocInfo, None)
vkBindImageMemory(self.__device, image, imageMemory, 0)
return (image, imageMemory)
def __transitionImageLayout(self, image, oldLayout, newLayout):
cmdBuffer = self.__beginSingleTimeCommands()
subresourceRange = VkImageSubresourceRange(
aspectMask=VK_IMAGE_ASPECT_COLOR_BIT,
baseMipLevel=0,
levelCount=1,
baseArrayLayer=0,
layerCount=1
)
barrier = VkImageMemoryBarrier(
oldLayout=oldLayout,
newLayout=newLayout,
srcQueueFamilyIndex=VK_QUEUE_FAMILY_IGNORED,
dstQueueFamilyIndex=VK_QUEUE_FAMILY_IGNORED,
image=image,
subresourceRange=subresourceRange
)
sourceStage = 0
destinationStage = 0
if oldLayout == VK_IMAGE_LAYOUT_UNDEFINED and newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.srcAccessMask = 0
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT
elif oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL and newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
else:
raise Exception('unsupported layout transition!')
vkCmdPipelineBarrier(cmdBuffer,
sourceStage,
destinationStage,
0,
0, None,
0, None,
1, barrier)
self.__endSingleTimeCommands(cmdBuffer)
def __copyBufferToImage(self, buffer, image, width, height):
cmdbuffer = self.__beginSingleTimeCommands()
subresource = VkImageSubresourceLayers(
aspectMask=VK_IMAGE_ASPECT_COLOR_BIT,
mipLevel=0,
baseArrayLayer=0,
layerCount=1
)
region = VkBufferImageCopy(
bufferOffset=0,
bufferRowLength=0,
bufferImageHeight=0,
imageSubresource=subresource,
imageOffset=[0, 0],
imageExtent=[width, height, 1]
)
vkCmdCopyBufferToImage(cmdbuffer, buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, region)
self.__endSingleTimeCommands(cmdbuffer)
def __createVertexBuffer(self):
bufferSize = self.__vertices.nbytes
stagingBuffer, stagingMemory = self.__createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
data = vkMapMemory(self.__device, stagingMemory, 0, bufferSize, 0)
vertePtr = ffi.cast('float *', self.__vertices.ctypes.data)
ffi.memmove(data, vertePtr, bufferSize)
vkUnmapMemory(self.__device, stagingMemory)
self.__vertexBuffer, self.__vertexBufferMemory = self.__createBuffer(bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
self.__copyBuffer(stagingBuffer, self.__vertexBuffer, bufferSize)
vkDestroyBuffer(self.__device, stagingBuffer, None)
vkFreeMemory(self.__device, stagingMemory, None)
def __createIndexBuffer(self):
bufferSize = self.__indices.nbytes
stagingBuffer, stagingMemory = self.__createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
data = vkMapMemory(self.__device, stagingMemory, 0, bufferSize, 0)
indicesPtr = ffi.cast('uint16_t*', self.__indices.ctypes.data)
ffi.memmove(data, indicesPtr, bufferSize)
vkUnmapMemory(self.__device, stagingMemory)
self.__indexBuffer, self.__indexBufferMemory = self.__createBuffer(bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
self.__copyBuffer(stagingBuffer, self.__indexBuffer, bufferSize)
vkDestroyBuffer(self.__device, stagingBuffer, None)
vkFreeMemory(self.__device, stagingMemory, None)
def __createUniformBuffer(self):
self.__uniformBuffer, self.__uniformBufferMemory = self.__createBuffer(self.__ubo.nbytes,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
def __createDescriptorPool(self):
poolSize1 = VkDescriptorPoolSize(
type=VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
descriptorCount=1
)
poolSize2 = VkDescriptorPoolSize(
type=VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
descriptorCount=1
)
poolInfo = VkDescriptorPoolCreateInfo(
pPoolSizes=[poolSize1, poolSize2],
maxSets=1
)
self.__descriptorPool = vkCreateDescriptorPool(self.__device, poolInfo, None)
def __createDescriptorSet(self):
layouts = [self.__descriptorSetLayout]
allocInfo = VkDescriptorSetAllocateInfo(
descriptorPool=self.__descriptorPool,
pSetLayouts=layouts
)
self.__descriptorSet = vkAllocateDescriptorSets(self.__device, allocInfo)
bufferInfo = VkDescriptorBufferInfo(
buffer=self.__uniformBuffer,
offset=0,
range=self.__ubo.nbytes
)
imageInfo = VkDescriptorImageInfo(
imageLayout=VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
imageView=self.__textureImageView,
sampler=self.__textureSampler
)
descriptWrite1 = VkWriteDescriptorSet(
dstSet=self.__descriptorSet[0],
dstBinding=0,
dstArrayElement=0,
descriptorType=VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
# descriptorCount=1,
pBufferInfo=[bufferInfo]
)
descriptWrite2 = VkWriteDescriptorSet(
dstSet=self.__descriptorSet[0],
dstBinding=1,
dstArrayElement=0,
descriptorType=VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
pImageInfo=[imageInfo]
)
vkUpdateDescriptorSets(self.__device, 2, [descriptWrite1, descriptWrite2], 0, None)
def __createBuffer(self, size, usage, properties):
buffer = None
bufferMemory = None
bufferInfo = VkBufferCreateInfo(
size=size,
usage=usage,
sharingMode=VK_SHARING_MODE_EXCLUSIVE
)
buffer = vkCreateBuffer(self.__device, bufferInfo, None)
memRequirements = vkGetBufferMemoryRequirements(self.__device, buffer)
allocInfo = VkMemoryAllocateInfo(
allocationSize=memRequirements.size,
memoryTypeIndex=self.__findMemoryType(memRequirements.memoryTypeBits, properties)
)
bufferMemory = vkAllocateMemory(self.__device, allocInfo, None)
vkBindBufferMemory(self.__device, buffer, bufferMemory, 0)
return (buffer, bufferMemory)
def __beginSingleTimeCommands(self):
allocInfo = VkCommandBufferAllocateInfo(
level=VK_COMMAND_BUFFER_LEVEL_PRIMARY,
commandPool=self.__commandPool,
commandBufferCount=1
)
commandBuffer = vkAllocateCommandBuffers(self.__device, allocInfo)[0]
beginInfo = VkCommandBufferBeginInfo(flags=VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT)
vkBeginCommandBuffer(commandBuffer, beginInfo)
return commandBuffer
def __endSingleTimeCommands(self, commandBuffer):
vkEndCommandBuffer(commandBuffer)
submitInfo = VkSubmitInfo(pCommandBuffers=[commandBuffer])
vkQueueSubmit(self.__graphicQueue, 1, [submitInfo], VK_NULL_HANDLE)
vkQueueWaitIdle(self.__graphicQueue)
vkFreeCommandBuffers(self.__device, self.__commandPool, 1, [commandBuffer])
def __copyBuffer(self, src, dst, bufferSize):
commandBuffer = self.__beginSingleTimeCommands()
# copyRegion = VkBufferCopy(size=bufferSize)
copyRegion = VkBufferCopy(0, 0, bufferSize)
vkCmdCopyBuffer(commandBuffer, src, dst, 1, [copyRegion])
self.__endSingleTimeCommands(commandBuffer)
def __findMemoryType(self, typeFilter, properties):
memProperties = vkGetPhysicalDeviceMemoryProperties(self.__physicalDevice)
for i, prop in enumerate(memProperties.memoryTypes):
if (typeFilter & (1 << i)) and ((prop.propertyFlags & properties) == properties):
return i
return -1
def __createCommandBuffers(self):
self.__commandBuffers = []
allocInfo = VkCommandBufferAllocateInfo(
commandPool=self.__commandPool,
level=VK_COMMAND_BUFFER_LEVEL_PRIMARY,
commandBufferCount=len(self.__swapChainFramebuffers)
)
self.__commandBuffers = vkAllocateCommandBuffers(self.__device, allocInfo)
for i, buffer in enumerate(self.__commandBuffers):
beginInfo = VkCommandBufferBeginInfo(flags=VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)
vkBeginCommandBuffer(buffer, beginInfo)
renderArea = VkRect2D([0, 0], self.__swapChainExtent)
clearColor = VkClearValue(color=[[0.0, 0.0, 0.0, 1.0]])
renderPassInfo = VkRenderPassBeginInfo(
renderPass=self.__renderpass,
framebuffer=self.__swapChainFramebuffers[i],
renderArea=renderArea,
pClearValues=[clearColor]
)
vkCmdBeginRenderPass(buffer, renderPassInfo, VK_SUBPASS_CONTENTS_INLINE)
vkCmdBindPipeline(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, self.__pipeline)
vkCmdBindVertexBuffers(buffer, 0, 1, [self.__vertexBuffer], [0])
vkCmdBindIndexBuffer(buffer, self.__indexBuffer, 0, VK_INDEX_TYPE_UINT16)
vkCmdBindDescriptorSets(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, self.__pipelineLayout, 0, 1, self.__descriptorSet, 0, None)
vkCmdDrawIndexed(buffer, len(self.__indices), 1, 0, 0, 0)
vkCmdEndRenderPass(buffer)
vkEndCommandBuffer(buffer)
def __createSemaphores(self):
semaphoreInfo = VkSemaphoreCreateInfo()
self.__imageAvailableSemaphore = vkCreateSemaphore(self.__device, semaphoreInfo, None)
self.__renderFinishedSemaphore = vkCreateSemaphore(self.__device, semaphoreInfo, None)
def __updateUniformBuffer(self):
currentTime = time.time()
t = currentTime - self.__startTime
self.__ubo.model = glm.rotate(np.identity(4, np.float32), 90.0 * t, 0.0, 0.0, 1.0)
self.__ubo.view = glm.lookAt(np.array([2, 2, 2], np.float32), np.array([0, 0, 0], np.float32), np.array([0, 0, 1], np.float32))
self.__ubo.proj = glm.perspective(-45.0, float(self.__swapChainExtent.width) / self.__swapChainExtent.height, 0.1, 10.0)
# self.__ubo.proj[1][1] *= -1
data = vkMapMemory(self.__device, self.__uniformBufferMemory, 0, self.__ubo.nbytes, 0)
ma = self.__ubo.toArray()
dptr = ffi.cast('float *', ma.ctypes.data)
ffi.memmove(data, dptr, self.__ubo.nbytes)
vkUnmapMemory(self.__device, self.__uniformBufferMemory)
def drawFrame(self):
if not self.isExposed():
return
try:
imageIndex = vkAcquireNextImageKHR(self.__device, self.__swapChain, 18446744073709551615,
self.__imageAvailableSemaphore, VK_NULL_HANDLE)
except VkErrorSurfaceLostKhr:
self.__recreateSwapChain()
return
# else:
# raise Exception('faild to acquire next image.')
waitSemaphores = [self.__imageAvailableSemaphore]
signalSemaphores = [self.__renderFinishedSemaphore]
waitStages = [VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT]
submit = VkSubmitInfo(
pWaitSemaphores=waitSemaphores,
pWaitDstStageMask=waitStages,
pCommandBuffers=[self.__commandBuffers[imageIndex]],
pSignalSemaphores=signalSemaphores
)
vkQueueSubmit(self.__graphicQueue, 1, submit, VK_NULL_HANDLE)
presenInfo = VkPresentInfoKHR(
pWaitSemaphores=signalSemaphores,
pSwapchains=[self.__swapChain],
pImageIndices=[imageIndex]
)
try:
vkQueuePresentKHR(self.__presentQueue, presenInfo)
except VkErrorOutOfDateKhr:
self.__recreateSwapChain()
if enableValidationLayers:
vkQueueWaitIdle(self.__presentQueue)
def __createShaderModule(self, shaderFile):
with open(shaderFile, 'rb') as sf:
code = sf.read()
createInfo = VkShaderModuleCreateInfo(
codeSize=len(code),
pCode=code
)
return vkCreateShaderModule(self.__device, createInfo, None)
def __chooseSwapSurfaceFormat(self, formats):
if len(formats) == 1 and formats[0].format == VK_FORMAT_UNDEFINED:
return [VK_FORMAT_B8G8R8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR]
for i in formats:
if i.format == VK_FORMAT_B8G8R8_UNORM and i.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR:
return i
return formats[0]
def __chooseSwapPresentMode(self, presentModes):
bestMode = VK_PRESENT_MODE_FIFO_KHR
for i in presentModes:
if i == VK_PRESENT_MODE_FIFO_KHR:
return i
elif i == VK_PRESENT_MODE_MAILBOX_KHR:
return i
elif i == VK_PRESENT_MODE_IMMEDIATE_KHR:
return i
return bestMode
def __chooseSwapExtent(self, capabilities):
width = max(capabilities.minImageExtent.width, min(capabilities.maxImageExtent.width, self.width()))
height = max(capabilities.minImageExtent.height, min(capabilities.maxImageExtent.height, self.height()))
return VkExtent2D(width, height)
def __querySwapChainSupport(self, device):
detail = SwapChainSupportDetails()
detail.capabilities = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, self.__surface)
detail.formats = vkGetPhysicalDeviceSurfaceFormatsKHR(device, self.__surface)
detail.presentModes = vkGetPhysicalDeviceSurfacePresentModesKHR(device, self.__surface)
return detail
def __isDeviceSuitable(self, device):
indices = self.__findQueueFamilies(device)
extensionsSupported = self.__checkDeviceExtensionSupport(device)
swapChainAdequate = False
if extensionsSupported:
swapChainSupport = self.__querySwapChainSupport(device)
swapChainAdequate = (swapChainSupport.formats is not None) and (swapChainSupport.presentModes is not None)
supportedFeatures = vkGetPhysicalDeviceFeatures(device)
return indices.isComplete and extensionsSupported and swapChainAdequate and supportedFeatures.samplerAnisotropy
def __checkDeviceExtensionSupport(self, device):
availableExtensions = vkEnumerateDeviceExtensionProperties(device, None)
aen = [i.extensionName for i in availableExtensions]
for i in deviceExtensions:
if i not in aen:
return False
return True
def __findQueueFamilies(self, device):
indices = QueueFamilyIndices()
familyProperties = vkGetPhysicalDeviceQueueFamilyProperties(device)
for i, prop in enumerate(familyProperties):
if prop.queueCount > 0 and prop.queueFlags & VK_QUEUE_GRAPHICS_BIT:
indices.graphicsFamily = i
presentSupport = vkGetPhysicalDeviceSurfaceSupportKHR(device, i, self.__surface)
if prop.queueCount > 0 and presentSupport:
indices.presentFamily = i
if indices.isComplete:
break
return indices
def __getRequiredExtensions(self):
extenstions = [e.extensionName for e in vkEnumerateInstanceExtensionProperties(None)]
if enableValidationLayers:
extenstions.append(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)
return extenstions
def __checkValidationLayerSupport(self):
availableLayers = vkEnumerateInstanceLayerProperties()
for layer in validationLayers:
layerfound = False
for layerProp in availableLayers:
if layer == layerProp.layerName:
layerfound = True
break
return layerfound
return False
def render(self):
self.__updateUniformBuffer()
self.drawFrame()
def resizeEvent(self, event):
if event.size() != event.oldSize():
self.__recreateSwapChain()
super(HelloTriangleApplication, self).resizeEvent(event)
if __name__ == '__main__':
import sys
app = QtGui.QGuiApplication(sys.argv)
win = HelloTriangleApplication()
win.show()
def clenaup():
global win
win.timer.stop()
del win
app.aboutToQuit.connect(clenaup)
sys.exit(app.exec_())
|
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo.config import cfg
import webob
import webob.exc
import nova.api.auth
from nova.openstack.common.gettextutils import _
from nova import test
CONF = cfg.CONF
class TestNovaKeystoneContextMiddleware(test.NoDBTestCase):
def setUp(self):
super(TestNovaKeystoneContextMiddleware, self).setUp()
@webob.dec.wsgify()
def fake_app(req):
self.context = req.environ['nova.context']
return webob.Response()
self.context = None
self.middleware = nova.api.auth.NovaKeystoneContext(fake_app)
self.request = webob.Request.blank('/')
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
self.request.headers['X_SERVICE_CATALOG'] = json.dumps({})
def test_no_user_or_user_id(self):
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '401 Unauthorized')
def test_user_only(self):
self.request.headers['X_USER_ID'] = 'testuserid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
def test_user_id_only(self):
self.request.headers['X_USER'] = 'testuser'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuser')
def test_user_id_trumps_user(self):
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_USER'] = 'testuser'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
def test_invalid_service_catalog(self):
self.request.headers['X_USER'] = 'testuser'
self.request.headers['X_SERVICE_CATALOG'] = "bad json"
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '500 Internal Server Error')
class TestKeystoneMiddlewareRoles(test.NoDBTestCase):
def setUp(self):
super(TestKeystoneMiddlewareRoles, self).setUp()
@webob.dec.wsgify()
def role_check_app(req):
context = req.environ['nova.context']
if "knight" in context.roles and "bad" not in context.roles:
return webob.Response(status="200 Role Match")
elif context.roles == ['']:
return webob.Response(status="200 No Roles")
else:
raise webob.exc.HTTPBadRequest(_("unexpected role header"))
self.middleware = nova.api.auth.NovaKeystoneContext(role_check_app)
self.request = webob.Request.blank('/')
self.request.headers['X_USER'] = 'testuser'
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
self.request.headers['X_SERVICE_CATALOG'] = json.dumps({})
self.roles = "pawn, knight, rook"
def test_roles(self):
# Test that the newer style role header takes precedence.
self.request.headers['X_ROLES'] = 'pawn,knight,rook'
self.request.headers['X_ROLE'] = 'bad'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 Role Match')
def test_roles_empty(self):
self.request.headers['X_ROLES'] = ''
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
def test_deprecated_role(self):
# Test fallback to older role header.
self.request.headers['X_ROLE'] = 'pawn,knight,rook'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 Role Match')
def test_role_empty(self):
self.request.headers['X_ROLE'] = ''
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
def test_no_role_headers(self):
# Test with no role headers set.
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
class TestPipeLineFactory(test.NoDBTestCase):
class FakeFilter(object):
def __init__(self, name):
self.name = name
self.obj = None
def __call__(self, obj):
self.obj = obj
return self
class FakeApp(object):
def __init__(self, name):
self.name = name
class FakeLoader():
def get_filter(self, name):
return TestPipeLineFactory.FakeFilter(name)
def get_app(self, name):
return TestPipeLineFactory.FakeApp(name)
def _test_pipeline(self, pipeline, app):
for p in pipeline.split()[:-1]:
self.assertEqual(app.name, p)
self.assertIsInstance(app, TestPipeLineFactory.FakeFilter)
app = app.obj
self.assertEqual(app.name, pipeline.split()[-1])
self.assertIsInstance(app, TestPipeLineFactory.FakeApp)
def test_pipeline_factory(self):
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, noauth=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_v3(self):
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory_v3(
TestPipeLineFactory.FakeLoader(), None, noauth=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_facotry_with_rate_limits(self):
CONF.set_override('api_rate_limit', True)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_facotry_without_rate_limits(self):
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline1 = 'test1 test2 test3'
fake_pipeline2 = 'test4 test5 test6'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None,
keystone_nolimit=fake_pipeline1,
keystone=fake_pipeline2)
self._test_pipeline(fake_pipeline1, app)
def test_pipeline_facotry_missing_nolimits_pipeline(self):
CONF.set_override('api_rate_limit', False)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_facotry_compatibility_with_v3(self):
CONF.set_override('api_rate_limit', True)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 ratelimit_v3 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline('test1 test3', app)
|
|
import os
import time
from datetime import datetime
import posixpath
from dotenv import set_key
from invoke import task
from setuptools_scm import get_version
from .base import LOCAL_PREFIX, REMOTE_PREFIX, do, env
from .notify import send_alert
from .wrap import compose, docker, git, python, s3cmd
@task
def test(ctx, cmd='uname -a', path='.'):
do(ctx, cmd=cmd, path=path, env={'foo': 'bar'})
# TODO: See what invoke did in their release task that requires a specific branch
@task
def release_code(ctx, project_name=None, version=None, upload=True, push=False, static=True, build=True):
"""Tag, build and optionally push and upload new project release
"""
# TODO set project name in ctx
project_name = project_name or os.path.basename(os.getcwd()).replace('-', '_')
scm_version = get_version()
version = version or '.'.join(scm_version.split('.')[:3])
if build:
print(f'Git version: {scm_version}')
if len(scm_version.split('.')) > 4:
print('First commit all changes, then run this task again')
return False
if scm_version != version:
git(ctx, f'tag v{version}')
# Clean and build
do(ctx, cmd='rm -rf build/')
python(ctx, cmd='setup.py bdist_wheel', conda_env=True)
if push:
git(ctx, f'push origin v{version}')
if upload:
s3cmd(ctx,
simple_path=f'dist/{project_name}-{version}-py3-none-any.whl', direction='up', project_name=project_name)
if static:
excludes = '--exclude=' + ' --exclude='.join(['"*.less"', '"*.md"', '"ckeditor/"'])
try:
do(ctx, f'webpack', path='src/assets/')
except Exception:
pass
python(ctx, f'./src/manage.py collectstatic --no-input -v0', conda_env=True)
# do(ctx, f'rm -rf .local/static/ckeditor/')
do(ctx, f'tar -zcvf .local/static_v{version}.tar.gz {excludes} -C .local/static/ .')
if upload:
s3cmd(ctx, local_path=f'.local/static_v{version}.tar.gz', s3_path=f'{project_name}/static/')
@task
def deploy_code(ctx, version, download=False, build=True, static=False, migrate=False, project=None, bucket=None):
project = project or ctx['project_name']
bucket = bucket or ctx['bucket_name']
# TODO: make configurable
stack_path = 'stack/django/'
local_path = '.local/'
if getattr(ctx, 'host', False):
path = posixpath
else:
path = os.path
stack_path = path.abspath(path.join(ctx.dir, stack_path))
local_path = path.abspath(path.join(ctx.dir, local_path))
# Update the env files
do(ctx, f'sed -i.bak "s/^VERSION=.*/VERSION={version}/g" {path.join(ctx.dir, ".env")}')
do(ctx, f'sed -i.bak "s/^VERSION=.*/VERSION={version}/g" {path.join(stack_path, ".env")}')
do(ctx, f'sed -i.bak "s/^PACKAGE_NAME=.*/PACKAGE_NAME=toolset-{version}-py3-none-any.whl/g" {path.join(stack_path, ".env")}')
if download:
do(ctx, f'aws s3 cp --quiet s3://{bucket}/{project}/dist/{project}-{version}-py3-none-any.whl {stack_path}/')
if static:
if download:
do(ctx, f'aws s3 cp --quiet s3://{bucket}/{project}/static/static_v{version}.tar.gz {local_path}')
do(ctx, f'tar -zvxf static_v{version}.tar.gz -C static/', path=local_path)
do(ctx, f'find {local_path}static/ -type d -exec chmod 755 {{}} \\;')
do(ctx, f'find {local_path}static/ -type f -exec chmod 644 {{}} \\;')
if build:
del os.environ['VERSION']
compose(ctx, cmd='build django')
compose(ctx, cmd='up -d django')
# compose(ctx, cmd='up -d celery_worker celery_camera')
if migrate:
db(ctx, 'backup', sync=True)
compose(ctx, cmd=f'exec django {project} migrate')
@task
def docker_ps(ctx):
"""
:return: List of running container names
"""
result = docker(ctx, cmd='ps -a --format "table {{.Names}}"', hide=True)
containers = result.stdout.split('\n')[1:-1]
print(containers)
return containers
# TODO: local_build for building docker image locally
@task
def release_superset(ctx, version):
# TODO: make variable
project = ctx['project_name']
project_root = f'../{project}'
bucket_name = ctx['S3_BUCKET_NAME']
do(ctx, 'rm -rf superset/assets/dist/*')
do(ctx, 'yarn run build', path='superset/assets/')
do(ctx, 'rm -rf build/*')
do(ctx, 'python setup.py bdist_wheel')
do(ctx, f'aws s3 cp --quiet dist/superset-{version}-py3-none-any.whl s3://{bucket_name}/superset/dist/')
do(ctx, f'cp ./dist/superset-{version}-py3-none-any.whl {project_root}/tests/stack/superset/')
# TODO: wrap set_key in function
if not env.dry_run:
# dotenv -f .env -q auto set VERSION version
set_key(
dotenv_path=f'{project_root}/.env', key_to_set='SUPERSET_VERSION', value_to_set=version, quote_mode='auto')
else:
print(REMOTE_PREFIX if ctx.get('host', False) else LOCAL_PREFIX,
f'dotenv -f {project_root}/.env -q auto set SUPERSET_VERSION {version}')
compose(ctx, 'build superset', path=f'{project_root}')
def now_tag(tag=None):
time_str = datetime.utcnow().replace(microsecond=0).isoformat().replace(':', '-') + 'Z'
return f'{time_str}_{tag}' if tag else time_str
def db_backup_old(ctx, tag=None, sync=True, notify=False, replica=True, project=None, image='postgres:9.5',
service_main='postgres', volume_main='postgres',
service_standby='postgres-replica', volume_standby='dbdata', data_dir=None):
if data_dir is None:
data_dir = os.path.abspath(
os.path.join(os.path.dirname(os.getenv('COMPOSE_FILE')), os.getenv('LOCAL_DIR')))
backup_path = os.path.join(ctx['dir'], f'{data_dir}/backups')
tag = now_tag(tag)
backup_cmd = f'tar -zcpf /backup/db_backup.{tag}.tar.gz /data'
# Stop container and make backup of ${PGDATA}
psql(ctx, sql=f"INSERT INTO backup_log (tag) VALUES ('{tag}');")
service = service_standby if replica else service_main
volume = volume_standby if replica else volume_main
compose(ctx, f'stop {service}')
docker(ctx, f'run --rm -v {project}_{volume}:/data -v {backup_path}:/backup {image} {backup_cmd}')
compose(ctx, f'start {service}')
if sync:
s3cmd(ctx, local_path=os.path.join(backup_path, f'db_backup.{tag}.tar.gz'),
s3_path=f'{ctx.s3_project_prefix}/backups/')
if replica:
result = psql(ctx, sql=f"SELECT * from backup_log WHERE tag='{tag}'", service=service)
if tag in getattr(result, 'stdout', ''):
print('Success!')
if notify:
message = f'Backup with tag={tag} uploaded to S3. Please verify.'
send_alert(ctx, message)
# if verify:
# backup=toolset/backups/db_backup....tar.gz
# md5check=$(aws s3api copy-object
# --copy-source dstack-storage/$backup
# --bucket dstack-storage
# --key $backup
# --metadata checked=True
# --metadata-directive REPLACE | jq .CopyObjectResult.ETag | tr -cd '[[:alnum:]]')
# md5sum .local/db_backup...tar.gz
def db_backup(ctx, tag=None, sync=True, project=None, data_dir=None, service='postgres'):
tag = now_tag(tag)
psql(ctx, sql=f"INSERT INTO backup_log (tag) VALUES ('{tag}');")
project = project or ctx['project_name']
host = getattr(ctx, 'host', False)
if host:
os_path = posixpath
else:
os_path = os.path
if data_dir is None:
data_dir = os_path.abspath(
os_path.join(os_path.dirname(os.getenv('COMPOSE_FILE')), os.getenv('LOCAL_DIR')))
backup_file = os_path.join(f'{data_dir}', 'backups', 'backup_latest.pg_dump')
docker(ctx, f'exec {project}_{service}_1 pg_dump -U postgres -F c -d postgres > {backup_file}')
if sync:
endpoint_url = f'--endpoint-url {os.getenv("ENDPOINT_URL")}'
do(ctx, f'aws {endpoint_url} s3 cp {backup_file} s3://dstack-storage/{project}/backups/backup_{tag}.pg_dump')
else:
# TODO: implement local backup copy
pass
@task
def db(ctx, cmd, tag=None, sync=True, notify=False, replica=True, project=None, image='postgres:9.5',
service_main='postgres', volume_main='postgres',
service_standby='postgres-replica', volume_standby='dbdata', data_dir=None):
"""
Args:
ctx:
cmd:
tag:
sync: Default=True. Whether to upload/download to/from s3 or not
notify: Default=True. Whether to post machine_status
replica: Whether to use simple backup/restore or backup/restore with replica
project:
image:
service_main:
volume_main:
service_standby:
volume_standby:
data_dir: The storage location for backups, static, media files.
Returns:
"""
if project is None:
project = ctx['project_name']
if data_dir is None:
data_dir = os.path.abspath(
os.path.join(os.path.dirname(os.getenv('COMPOSE_FILE')), os.getenv('LOCAL_DIR')))
backup_path = os.path.join(ctx['dir'], f'{data_dir}/backups')
# promote_cmd = 'su - postgres -c "/usr/lib/postgresql/9.5/bin/pg_ctl promote -D /var/lib/postgresql/data"'
if cmd == 'backup':
db_backup(ctx, tag=tag, sync=sync, project=project, data_dir=data_dir, service=service_main)
elif cmd == 'restore':
if sync:
s3cmd(ctx, direction='down',
s3_path=f'{ctx.s3_project_prefix}/backups/db_backup.{tag}.tar.gz',
local_path=f'{backup_path}/')
restore_cmd = f'bash -c "tar xpf /backup/db_backup.{tag}.tar.gz && chmod -R 700 /data"'
# TODO: First restart django with updated POSTGRES_HOST=standby and then only destroy afterwards
if replica:
# Destroy replica server and associated volume
compose(ctx, f'rm -vsf {service_standby}')
docker(ctx, f'volume rm {project}_{volume_standby}', warn=True)
# Restore database
compose(ctx, f'-p {project} stop {service_main}')
docker(ctx, f'run --rm -v {project}_{volume_main}:/data -v {backup_path}:/backup {image} {restore_cmd}')
compose(ctx, f'-p {project} start {service_main}')
# compose(ctx, f'exec -T {service_main} {promote_cmd}')
compose(ctx, f'-p {project} exec -T {service_main} touch /tmp/pg_failover_trigger')
if replica:
# Recreate standby database
compose(ctx, f'up -d {service_standby}')
elif cmd == 'recreate-standby':
compose(ctx, f'rm -vsf {service_standby}')
docker(ctx, f'volume rm {project}_{volume_standby}')
compose(ctx, f'up -d {service_standby}')
elif cmd == 'check':
result_main = psql(ctx, 'SELECT * from backup_log;')
result_standby = psql(ctx, 'SELECT * from backup_log;', service='postgres-replica')
if ('initialized' in getattr(result_main, 'stdout', '') and
'initialized' in getattr(result_standby, 'stdout', '')):
print('Success!')
elif cmd == 'enable-replication':
# TODO: Test this code and maybe make part of main restore task
compose(ctx, f'exec {service_main} ./docker-entrypoint-initdb.d/10-config.sh')
compose(ctx, f'exec {service_main} ./docker-entrypoint-initdb.d/20-replication.sh')
compose(ctx, f'restart {service_main}')
compose(ctx, f'up -d {service_standby}')
@task
def psql(ctx, sql, service='postgres', user='postgres'):
return compose(ctx, f'exec -T {service} psql -U {user} -c "{sql}"')
@task
def full_db_test(ctx):
db(ctx, cmd='backup', upload=False)
psql(ctx, sql=f"INSERT INTO backup_log (tag) VALUES ('not_backed_up'); SELECT * from backup_log;")
tag = input("Tag:")
db(ctx, cmd='restore', tag=tag)
time.sleep(10)
db(ctx, cmd='check')
@task
def create_backup_table(ctx):
sql = """CREATE TABLE IF NOT EXISTS backup_log (
id serial not null primary key,
date_created timestamp default current_timestamp,
tag VARCHAR(255))"""
psql(ctx, sql=" ".join(sql.split()))
psql(ctx, sql="INSERT INTO backup_log (tag) VALUES ('initialized');")
@task
def install_superset(ctx):
# CREATE DATABASE superset;
# CREATE USER superset WITH PASSWORD 'superset';
# GRANT ALL PRIVILEGES ON DATABASE superset TO superset;
# TODO: Make database name and password env variables
psql(ctx, 'CREATE DATABASE superset')
psql(ctx, "CREATE USER superset WITH PASSWORD 'superset'")
psql(ctx, 'GRANT ALL PRIVILEGES ON DATABASE superset TO superset')
compose(ctx, 'exec superset fabmanager create-admin --app superset')
compose(ctx, 'exec superset superset db upgrade')
compose(ctx, 'exec superset superset init')
# @task
# def secure_copy():
# aws s3 cp --sse=AES256 s3://dstack-storage/tfc ./
# gpg -d tfc | tar -zxf -
# rm -rf tfc
|
|
import logging
import xml.etree.ElementTree as ET
import re
import gzip
import io
import shutil
import csv
import urllib.parse
from dipper.sources.OMIMSource import OMIMSource
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.assoc.D2PAssoc import D2PAssoc
from dipper.models.Genotype import Genotype
from dipper.models.Reference import Reference
from dipper.sources.NCBIGene import NCBIGene
from dipper.utils.DipperUtil import DipperUtil
from dipper.models.Model import Model
from dipper.models.BiolinkVocabulary import BioLinkVocabulary as blv
LOG = logging.getLogger(__name__)
class OMIA(OMIMSource):
"""
This is the parser for the
[Online Mendelian Inheritance in Animals
(OMIA)](http://www.http://omia.angis.org.au),
from which we process inherited disorders, other (single-locus) traits,
and genes in >200 animal species (other than human and mouse and rats).
We generate the omia graph to include the following information:
* genes
* animal taxonomy, and breeds as instances of those taxa
(breeds are akin to "strains" in other taxa)
* animal diseases, along with species-specific subtypes of those diseases
* publications (and their mapping to PMIDs, if available)
* gene-to-phenotype associations (via an anonymous variant-locus
* breed-to-phenotype associations
We make links between OMIA and OMIM in two ways:
1. mappings between OMIA and OMIM are created as OMIA --> hasdbXref OMIM
2. mappings between a breed and OMIA disease are created
to be a 'is model of' the mapped OMIM disease,
IF AND ONLY IF it is a 1:1 mapping.
there are some 1:many mappings,
and these often happen if the OMIM item is a gene.
Because many of these species are not covered in
the PANTHER orthology datafiles, we also pull any orthology
relationships from the gene_group files from NCBI.
"""
files = {
'data': {
'file': 'omia.xml.gz',
# CNAME broken? urllib not following redirects??
# 'url': 'http://omia.angis.org.au/dumps/omia.xml.gz'
# 'url': 'http://compldb.angis.org.au/dumps/omia.xml.gz',
'url': 'https://omia.org/dumps/omia.xml.gz'
# see dipper/resources/omia/omia_xml.* for xml xpaths and more
},
'causal_mutations': { # not used yet
'file': 'causal_mutations.tab',
'columns': [ # expected
'gene_symbol',
'ncbi_gene_id',
'OMIA_id',
'ncbi_tax_id',
'OMIA_url',
'phene_name'],
'url': 'http://omia.org/curate/causal_mutations/?format=gene_table',
},
}
def __init__(
self, graph_type, are_bnodes_skolemized, data_release_version=None):
super().__init__(
graph_type=graph_type,
are_bnodes_skolemized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='omia',
ingest_title='Online Mendelian Inheritance in Animals',
ingest_url='https://omia.org',
ingest_logo='source-omia.png',
# ingest_desc=None,
license_url=None,
data_rights='http://sydney.edu.au/disclaimer.shtml',
# file_handle=None
)
self.id_hash = {
'article': {},
'phene': {},
'breed': {},
'taxon': {},
'gene': {}
}
self.label_hash = {}
# used to store the omia to omim phene mappings
self.omia_omim_map = {}
# used to store the unique genes that have phenes
# (for fetching orthology)
self.annotated_genes = set()
self.test_ids = {
'disease': [
'OMIA:001702', 'OMIA:001867', 'OMIA:000478', 'OMIA:000201',
'OMIA:000810', 'OMIA:001400'],
'gene': [
'492297', '434', '492296', '3430235', '200685834', '394659996',
'200685845', '28713538', '291822383'],
'taxon': [
'9691', '9685', '9606', '9615', '9913', '93934', '37029', '9627',
'9825'],
# to be filled in during parsing of breed table
# for lookup by breed-associations
'breed': []
}
# to store a map of omia ids and any molecular info
# to write a report for curation
self.stored_omia_mol_gen = {}
self.graph = self.graph
self.ncbi = NCBIGene(self.graph_type, self.are_bnodes_skized)
def fetch(self, is_dl_forced=False):
"""
:param is_dl_forced:
:return:
"""
self.get_files(is_dl_forced)
gene_group = self.ncbi.files['gene_group']
self.fetch_from_url(
gene_group['url'], '/'.join((self.ncbi.rawdir, gene_group['file'])), False)
def parse(self, limit=None):
# names of tables to iterate over - probably don't need all these:
# Article_Breed, Article_Keyword, Article_Gene, Article_Keyword,
# Article_People, Article_Phene, Articles, Breed, Breed_Phene,
# Genes_gb, Group_Categories, Group_MPO, Inherit_Type, Keywords,
# Landmark, Lida_Links, OMIA_Group, OMIA_author, Omim_Xref, People,
# Phene, Phene_Gene, Publishers, Resources, Species_gb, Synonyms
self.scrub()
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
if self.test_mode:
self.graph = self.testgraph
else:
self.graph = self.graph
# we do three passes through the file
# first process species (two others reference this one)
self.process_species(limit)
# then, process the breeds, genes, articles, and other static stuff
self.process_classes(limit)
# next process the association data
self.process_associations(limit)
# process the vertebrate orthology for genes
# that are annotated with phenotypes
self.ncbi.add_orthologs_by_gene_group(self.graph, self.annotated_genes)
LOG.info("Done parsing.")
self.write_molgen_report()
def scrub(self):
"""
The XML file seems to have mixed-encoding;
we scrub out the control characters
from the file for processing.
i.e.?
omia.xml:1555328.28: PCDATA invalid Char value 2
<field name="journal">Bulletin et Memoires de la Societe Centrale de Medic
:return:
"""
LOG.info("Scrubbing out the nasty characters that break our parser.")
myfile = '/'.join((self.rawdir, self.files['data']['file']))
tmpfile = '/'.join((self.rawdir, self.files['data']['file'] + '.tmp.gz'))
tmp = gzip.open(tmpfile, 'wb')
du = DipperUtil()
with gzip.open(myfile, 'rb') as readbin:
filereader = io.TextIOWrapper(readbin, newline="")
for line in filereader:
line = du.remove_control_characters(line) + '\n'
tmp.write(line.encode('utf-8'))
tmp.close()
# TEC I do not like this at all. original data must be preserved as is.
# also may be heavy handed as chars which do not break the parser
# are stripped as well (i.e. tabs and newlines)
# move the temp file
LOG.info("Replacing the original data with the scrubbed file.")
shutil.move(tmpfile, myfile)
# ###################### XML LOOPING FUNCTIONS ##################
def process_species(self, limit):
"""
Loop through the xml file and process the species.
We add elements to the graph, and store the
id-to-label in the label_hash dict.
:param limit:
:return:
"""
myfile = '/'.join((self.rawdir, self.files['data']['file']))
with gzip.open(myfile, 'rb') as readbin:
filereader = io.TextIOWrapper(readbin, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader):
# Species ids are == NCBITaxon ids
self.process_xml_table(
elem, 'Species_gb', self._process_species_table_row, limit)
def process_classes(self, limit):
"""
After all species have been processed .
Loop through the xml file and process the articles,
breed, genes, phenes, and phenotype-grouping classes.
We add elements to the graph,
and store the id-to-label in the label_hash dict,
along with the internal key-to-external id in the id_hash dict.
The latter are referenced in the association processing functions.
:param limit:
:return:
"""
myfile = '/'.join((self.rawdir, self.files['data']['file']))
with gzip.open(myfile, 'rb') as readbin:
filereader = io.TextIOWrapper(readbin, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader):
self.process_xml_table(
elem, 'Articles', self._process_article_row, limit)
self.process_xml_table(elem, 'Breed', self._process_breed_row, limit)
self.process_xml_table(elem, 'Genes_gb', self._process_gene_row, limit)
self.process_xml_table(
elem, 'OMIA_Group', self._process_omia_group_row, limit)
self.process_xml_table(elem, 'Phene', self._process_phene_row, limit)
self.process_xml_table(
elem, 'Omim_Xref', self._process_omia_omim_map, limit)
# post-process the omia-omim associations to filter out the genes
# (keep only phenotypes/diseases)
self.clean_up_omim_genes()
def process_associations(self, limit):
"""
Loop through the xml file and process the article-breed, article-phene,
breed-phene, phene-gene associations, and the external links to LIDA.
:param limit:
:return:
"""
myfile = '/'.join((self.rawdir, self.files['data']['file']))
with gzip.open(myfile, 'rb') as readbin:
filereader = io.TextIOWrapper(readbin, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader): # iterparse is not deprecated
self.process_xml_table(
elem, 'Article_Breed', self._process_article_breed_row, limit)
self.process_xml_table(
elem, 'Article_Phene', self._process_article_phene_row, limit)
self.process_xml_table(
elem, 'Breed_Phene', self._process_breed_phene_row, limit)
self.process_xml_table(
elem, 'Lida_Links', self._process_lida_links_row, limit)
self.process_xml_table(
elem, 'Phene_Gene', self._process_phene_gene_row, limit)
self.process_xml_table(
elem, 'Group_MPO', self._process_group_mpo_row, limit)
# ############ INDIVIDUAL TABLE-LEVEL PROCESSING FUNCTIONS ################
def _process_species_table_row(self, row): # row is expected as a dict
# gb_species_id, sci_name, com_name, added_by, date_modified
tax_id = 'NCBITaxon:' + str(row['gb_species_id'])
sci_name = row['sci_name']
com_name = row['com_name']
model = Model(self.graph)
if self.test_mode and row['gb_species_id'] not in self.test_ids['taxon']:
return
model.addClassToGraph(tax_id)
if com_name != '':
model.addSynonym(tax_id, com_name)
self.label_hash[tax_id] = com_name # for lookup later
else:
self.label_hash[tax_id] = sci_name
def _process_breed_row(self, row):
model = Model(self.graph)
# in test mode, keep all breeds of our test species
if self.test_mode and row['gb_species_id'] not in self.test_ids['taxon']:
return
# save the breed keys in the test_ids for later processing
self.test_ids['breed'] += [row['breed_id']]
breed_id = 'OMIA-breed:' + str(row['breed_id'])
self.id_hash['breed'][row['breed_id']] = breed_id
tax_id = 'NCBITaxon:' + str(row['gb_species_id'])
breed_label = row['breed_name']
species_label = self.label_hash.get(tax_id)
if species_label is not None:
breed_label = breed_label + ' (' + species_label + ')'
model.addIndividualToGraph(
breed_id,
breed_label,
tax_id,
ind_category=blv.terms['PopulationOfIndividualOrganisms']
)
self.label_hash[breed_id] = breed_label
def _process_phene_row(self, row):
model = Model(self.graph)
phenotype_id = None
sp_phene_label = row['phene_name']
if sp_phene_label == '':
sp_phene_label = None
if 'omia_id' not in row:
LOG.info("omia_id not present for %s", row['phene_id'])
omia_id = self._make_internal_id('phene', phenotype_id)
else:
omia_id = 'OMIA:' + str(row['omia_id'])
if self.test_mode and not( # demorgan this
row['gb_species_id'] in self.test_ids['taxon'] and omia_id
in self.test_ids['disease']):
return
# add to internal hash store for later lookup
self.id_hash['phene'][row['phene_id']] = omia_id
descr = row['summary']
if descr == '':
descr = None
# omia label
omia_label = self.label_hash.get(omia_id)
# add the species-specific subclass (TODO please review this choice)
gb_species_id = row['gb_species_id']
if gb_species_id != '':
sp_phene_id = '-'.join((omia_id, gb_species_id))
else:
LOG.error(
"No species supplied in species-specific phene table for %s", omia_id)
return
species_id = 'NCBITaxon:' + str(gb_species_id)
# use this instead
species_label = self.label_hash.get('NCBITaxon:' + gb_species_id)
if sp_phene_label is None and omia_label is not None \
and species_label is not None:
sp_phene_label = ' '.join((omia_label, 'in', species_label))
model.addClassToGraph(
sp_phene_id,
sp_phene_label,
omia_id,
descr,
class_category=blv.terms['PhenotypicFeature']
)
# add to internal hash store for later lookup
self.id_hash['phene'][row['phene_id']] = sp_phene_id
self.label_hash[sp_phene_id] = sp_phene_label
# add each of the following descriptions,
# if they are populated, with a tag at the end.
for item in ['clin_feat', 'history', 'pathology', 'mol_gen', 'control']:
if row[item] is not None and row[item] != '':
model.addDescription(
sp_phene_id,
row[item] + ' [' + item + ']',
subject_category=blv.terms['PhenotypicFeature']
)
# if row['symbol'] is not None: # species-specific
# CHECK ME - sometimes spaces or gene labels
# gu.addSynonym(g, sp_phene, row['symbol'])
model.addOWLPropertyClassRestriction(
sp_phene_id,
self.globaltt['in taxon'],
species_id,
class_category=blv.terms['PhenotypicFeature']
)
# add inheritance as an association
inheritance_id = None
if row['inherit'] is not None and row['inherit'] in self.localtt:
inheritance_id = self.resolve(row['inherit'])
elif row['inherit'] is not None and row['inherit'] != '':
LOG.info('Unhandled inheritance type:\t%s', row['inherit'])
if inheritance_id is not None: # observable related to genetic disposition
assoc = D2PAssoc( # JR: not sure we should be using D2PAssoc for this
self.graph, self.name, sp_phene_id, inheritance_id,
rel=self.globaltt['has disposition'],
disease_category=blv.terms['PhenotypicFeature']
)
assoc.add_association_to_graph()
if row['characterised'] == 'Yes':
self.stored_omia_mol_gen[omia_id] = {
'mol_gen': row['mol_gen'],
'map_info': row['map_info'],
'species': row['gb_species_id']}
def write_molgen_report(self):
LOG.info("Writing G2P report for OMIA")
filename = '/'.join((self.outdir, 'omia_molgen_report.txt'))
with open(filename, 'w', newline='\n') as csvfile:
writer = csv.writer(csvfile, delimiter='\t')
writer.writerow( # write header
['omia_id', 'molecular_description', 'mapping_info', 'species'])
for phene in self.stored_omia_mol_gen:
writer.writerow((
str(phene),
self.stored_omia_mol_gen[phene]['mol_gen'],
self.stored_omia_mol_gen[phene]['map_info'],
self.stored_omia_mol_gen[phene]['species']))
LOG.info(
"Wrote %d potential G2P descriptions for curation to %s",
len(self.stored_omia_mol_gen), filename)
def _process_article_row(self, row):
model = Model(self.graph)
# don't bother in test mode
if self.test_mode:
return
iarticle_id = self._make_internal_id('article', row['article_id'])
self.id_hash['article'][row['article_id']] = iarticle_id
rtype = None
if row['journal'] != '':
rtype = self.globaltt['journal article']
reference = Reference(self.graph, iarticle_id, rtype)
if row['title'] is not None:
reference.setTitle(row['title'].strip())
if row['year'] is not None:
reference.setYear(row['year'])
reference.addRefToGraph()
if row['pubmed_id'] is not None:
pmid = 'PMID:' + str(row['pubmed_id'])
self.id_hash['article'][row['article_id']] = pmid
model.addSameIndividual(iarticle_id, pmid)
model.addComment(pmid, iarticle_id.replace("_:", ''))
def _process_omia_group_row(self, row):
model = Model(self.graph)
omia_id = 'OMIA:' + row['omia_id']
if self.test_mode and omia_id not in self.test_ids['disease']:
return
group_name = row['group_name']
group_summary = row['group_summary']
# default to general disease seems the only reasonable choice
disease_id = self.globaltt['disease or disorder']
group_category = 'group_category:' + str(row['group_category'])
disease_id = self.resolve(group_category, False)
if disease_id == 'group_category:None':
disease_id = self.globaltt['disease or disorder']
elif disease_id == group_category:
LOG.info(
"No disease superclass defined for %s: %s with parent %s",
omia_id, group_name, group_category)
disease_id = self.globaltt['disease or disorder']
else:
if disease_id == self.globaltt['embryonic lethality']:
# add this as a phenotype association
# add embryonic onset
assoc = D2PAssoc(self.graph, self.name, omia_id, disease_id)
assoc.add_association_to_graph()
# disease_id = None
model.addClassToGraph(disease_id, None,
class_category=blv.terms['Disease'])
if group_summary == '':
group_summary = None
if group_name == '':
group_name = None
model.addClassToGraph(
omia_id, group_name, description=group_summary, class_type=disease_id)
self.label_hash[omia_id] = group_name
def _process_gene_row(self, row):
model = Model(self.graph)
geno = Genotype(self.graph)
if self.test_mode and row['gene_id'] not in self.test_ids['gene']:
return
gene_id = 'NCBIGene:' + str(row['gene_id'])
self.id_hash['gene'][row['gene_id']] = gene_id
gene_label = row['symbol']
self.label_hash[gene_id] = gene_label
tax_id = 'NCBITaxon:' + str(row['gb_species_id'])
if row['gene_type'] is not None:
gene_type_id = self.resolve(row['gene_type'])
model.addClassToGraph(gene_id, gene_label, gene_type_id)
geno.addTaxon(tax_id, gene_id)
def _process_article_breed_row(self, row):
# article_id, breed_id, added_by
# don't bother putting these into the test... too many!
# and row['breed_id'] not in self.test_ids['breed']:
if self.test_mode:
return
article_id = self.id_hash['article'].get(row['article_id'])
breed_id = self.id_hash['breed'].get(row['breed_id'])
# there's some missing data (article=6038). in that case skip
if article_id is not None:
self.graph.addTriple(article_id, self.globaltt['is_about'], breed_id)
else:
LOG.warning("Missing article key %s", str(row['article_id']))
def _process_article_phene_row(self, row):
"""
Linking articles to species-specific phenes.
:param row:
:return:
"""
# article_id, phene_id, added_by
# look up the article in the hashmap
phenotype_id = self.id_hash['phene'].get(row['phene_id'])
article_id = self.id_hash['article'].get(row['article_id'])
omia_id = self._get_omia_id_from_phene_id(phenotype_id)
if self.test_mode or omia_id not in self.test_ids['disease'] \
or phenotype_id is None or article_id is None:
return
# make a triple, where the article is about the phenotype
self.graph.addTriple(article_id, self.globaltt['is_about'], phenotype_id)
def _process_breed_phene_row(self, row):
model = Model(self.graph)
# Linking disorders/characteristic to breeds
# breed_id, phene_id, added_by
breed_id = self.id_hash['breed'].get(row['breed_id'])
phene_id = self.id_hash['phene'].get(row['phene_id'])
# get the omia id
omia_id = self._get_omia_id_from_phene_id(phene_id)
if breed_id is None or phene_id is None or (
self.test_mode and (
omia_id not in self.test_ids['disease'] or
row['breed_id'] not in self.test_ids['breed'])):
return
# FIXME we want a different relationship here
# JR: probably shouldn't use G2PAssoc here
assoc = G2PAssoc(
self.graph, self.name, breed_id, phene_id, self.globaltt['has phenotype'])
assoc.add_association_to_graph()
# add that the breed is a model of the human disease
# use the omia-omim mappings for this
# we assume that we have already scrubbed out the genes
# from the omim list, so we can make the model associations here
omim_ids = self.omia_omim_map.get(omia_id)
eco_id = self.globaltt['biological aspect of descendant evidence']
if omim_ids is not None and omim_ids:
# if len(omim_ids) > 1:
# LOG.info(
# "There's 1:many omia:omim mapping: %s, %s", omia_id, str(omim_ids))
# else:
# oid = list(omim_ids)[0]
# LOG.info("OMIA %s is mapped to OMIM %s", omia_id, oid)
for oid in omim_ids:
assoc = G2PAssoc(
self.graph, self.name, breed_id, oid, self.globaltt['is model of']
)
assoc.add_evidence(eco_id)
assoc.add_association_to_graph()
aid = assoc.get_association_id()
breed_label = self.label_hash.get(breed_id)
if breed_label is None: # get taxon label?
breed_label = "this breed"
mch = re.search(r'\((.*)\)', breed_label)
if mch:
sp_label = mch.group(1)
else:
sp_label = ''
phene_label = self.label_hash.get(phene_id)
if phene_label is None:
phene_label = "phenotype"
elif phene_label.endswith(sp_label):
# some of the labels we made already include the species;
# remove it to make a cleaner desc
phene_label = re.sub(r' in ' + sp_label, '', phene_label)
desc = ' '.join(
("High incidence of", phene_label, "in", breed_label,
"suggests it to be a model of disease", oid + "."))
model.addDescription(aid, desc)
else:
LOG.warning("No OMIM Disease associated with %s", omia_id)
def _process_lida_links_row(self, row):
model = Model(self.graph)
# lidaurl, omia_id, added_by
omia_id = 'OMIA:' + row['omia_id']
lidaurl = row['lidaurl']
if self.test_mode and omia_id not in self.test_ids['disease']:
return
# LIDIA is hard to find/redolve (404s; suspect offline)
# consider changing to model.addSynonym((omia_id, lidaurl)
# b/c uri are not literals
model.addXref(omia_id, urllib.parse.quote(lidaurl))
def _process_phene_gene_row(self, row):
geno = Genotype(self.graph)
model = Model(self.graph)
gene_id = self.id_hash['gene'].get(row['gene_id'])
phene_id = self.id_hash['phene'].get(row['phene_id'])
omia_id = self._get_omia_id_from_phene_id(phene_id)
if self.test_mode and not (
omia_id in self.test_ids['disease'] and
row['gene_id'] in self.test_ids['gene']
) or gene_id is None or phene_id is None:
return
# occasionally some phenes are missing! (ex: 406)
if phene_id is None:
LOG.warning("Phene id %s is missing", str(row['phene_id']))
return
gene_label = self.label_hash[gene_id]
# some variant of gene_id has phenotype d
var = self.make_id(gene_id.split(':')[-1] + 'VL', '_')
geno.addAllele(var, 'some variant of ' + gene_label)
geno.addAlleleOfGene(var, gene_id)
geno.addAffectedLocus(var, gene_id)
model.addBlankNodeAnnotation(var)
assoc = G2PAssoc(self.graph, self.name, var, phene_id)
assoc.add_association_to_graph()
# add the gene id to the set of annotated genes
# for later lookup by orthology
self.annotated_genes.add(gene_id)
def _process_omia_omim_map(self, row):
"""
Links OMIA groups to OMIM equivalents.
:param row:
:return:
"""
# omia_id, omim_id, added_by
model = Model(self.graph)
omia_id = 'OMIA:' + row['omia_id']
omim_id = 'OMIM:' + row['omim_id']
# also store this for use when we say that a given animal is
# a model of a disease
if omia_id not in self.omia_omim_map:
self.omia_omim_map[omia_id] = set()
self.omia_omim_map[omia_id].add(omim_id)
if self.test_mode and omia_id not in self.test_ids['disease']:
return
model.addXref(omia_id, omim_id,
class_category=blv.terms['Disease'],
xref_category=blv.terms['Disease'])
def _process_group_mpo_row(self, row):
"""
Make OMIA to MP associations
:param row:
:return:
"""
omia_id = 'OMIA:' + row['omia_id']
mpo_num = row['MPO_no']
mpo_id = 'MP:' + str(mpo_num).zfill(7)
assoc = D2PAssoc(self.graph, self.name, omia_id, mpo_id)
assoc.add_association_to_graph()
def clean_up_omim_genes(self):
'''
Attempt to limit omim links to diseases and not genes/locus
'''
# get all the omim ids
allomim_curie = set()
for omia in self.omia_omim_map:
allomim_curie.update(self.omia_omim_map[omia])
# strip the curie prefix
allomimids = set([o.split(':')[-1] for o in allomim_curie])
LOG.info("Have %i omim_ids before filtering", len(allomimids))
LOG.info("Exists %i omim_ids replaceable", len(self.omim_replaced))
if self.omim_replaced:
LOG.info(
"Sample of each (all & replace) look like: %s , %s",
list(allomimids)[0],
list(self.omim_replaced.keys())[0])
# deal with replaced identifiers
replaced = allomimids & self.omim_replaced.keys()
if replaced is not None and replaced:
LOG.warning("These OMIM ID's are past their pull date: %s", str(replaced))
for oid in replaced:
allomimids.remove(oid)
replacements = self.omim_replaced[oid]
for rep in replacements:
allomimids.update(rep)
# guard against omim identifiers which have been removed
obsolete = [
o for o in self.omim_type
if self.omim_type[o] == self.globaltt['obsolete']]
removed = allomimids & set(obsolete)
if removed is not None and removed:
LOG.warning("These OMIM ID's are gone: %s", str(removed))
for oid in removed:
allomimids.remove(oid)
# get a list of omim ids which we consider to be for disease / phenotype
omim_phenotypes = set([
omim for omim in self.omim_type if self.omim_type[omim] in (
self.globaltt['phenotype'],
self.globaltt['has_affected_feature'],
self.globaltt['heritable_phenotypic_marker'])])
LOG.info(
"Have %i omim_ids globally typed as phenotypes from OMIM",
len(omim_phenotypes))
entries_that_are_phenotypes = allomimids & omim_phenotypes
LOG.info(
"Filtered out %d/%d entries that are genes or features",
len(allomimids - entries_that_are_phenotypes), len(allomimids))
# now iterate again and remove those non-phenotype ids
# this could be redone with set operations
removed_count = 0
for omia in self.omia_omim_map:
cleanids = set()
for dirty_curie in self.omia_omim_map[omia]:
dirty_num = dirty_curie.split(':')[-1]
if dirty_num in entries_that_are_phenotypes:
cleanids.add(dirty_curie)
else:
removed_count += 1 # keep track of how many we've removed
self.omia_omim_map[omia] = cleanids
LOG.info("Removed %d omim ids from the omia-to-omim map", removed_count)
@staticmethod
def _make_internal_id(prefix, key):
''' more blank nodes '''
return '_:' + ''.join(('omia', prefix, 'key', str(key)))
@staticmethod
def _get_omia_id_from_phene_id(phene_id):
omia_id = None
if phene_id is not None:
mch = re.match(r'OMIA:\d+', str(phene_id))
if mch:
omia_id = mch.group(0)
return omia_id
def getTestSuite(self):
import unittest
from tests.test_omia import OMIATestCase
test_suite = unittest.TestLoader().loadTestsFromTestCase(OMIATestCase)
return test_suite
|
|
# This file is part of curious.
#
# curious is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# curious is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with curious. If not, see <http://www.gnu.org/licenses/>.
"""
Wrappers for Webhook objects.
.. currentmodule:: curious.dataclasses.webhook
"""
import typing
from curious.dataclasses import channel as dt_channel, embed as dt_embed, guild as dt_guild, \
user as dt_user
from curious.dataclasses.bases import Dataclass
from curious.util import base64ify
class Webhook(Dataclass):
"""
Represents a webhook on the guild.
Messages in a guild can be sent by either a Member or a Webhook object - curious makes a key
distinction between them. These classes are *mostly* compatible and don't require much
effort to use them generically.
.. code-block:: python3
@event("message_create")
async def handle_messages(ctx, message: Message):
author = message.author # can be Webhook or Member
"""
__slots__ = "user", "guild_id", "channel_id", "token", "owner", \
"default_name", "_default_avatar"
def __init__(self, client, **kwargs) -> None:
# Use the webhook ID is provided (i.e created from a message object).
# If that doesn't exist, we use the ID of the data instead (it's probably right!).
super().__init__(kwargs.get("webhook_id", kwargs.get("id")), cl=client)
#: The user object associated with this webhook.
self.user = None # type: dt_user.User
#: The ID of the Guild associated with this object.
self.guild_id = None # type: int
#: The ID of the Channel associated with this object.
self.channel_id = None # type: int
#: The token associated with this webhook.
#: This is None if the webhook was received from a Message object.
self.token = kwargs.get("token", None) # type: str
#: The owner of this webhook.
self.owner = None # type: dt_user.User
#: The default name of this webhook.
self.default_name = None # type: str
#: The default avatar of this webhook.
self._default_avatar = None # type: str
def __repr__(self) -> str:
return "<Webhook id={} name={} channel={} owner={}>".format(self.id, self.name,
repr(self.channel),
repr(self.owner))
__str__ = __repr__
@property
def default_avatar_url(self) -> str:
"""
:return: The default avatar URL for this webhook.
"""
return "https://cdn.discordapp.com/avatars/{}/{}.png".format(self.id, self._default_avatar)
@property
def avatar_url(self) -> str:
"""
:return: The computed avatar URL for this webhook.
"""
if self.user.avatar_hash is None:
return self.default_avatar_url
return str(self.user.avatar_url)
@property
def name(self) -> str:
"""
:return: The computed name for this webhook.
"""
# this is kept so you can easily do `message.author.name` all the time.
return self.user.name or self.default_name
@property
def guild(self) -> 'dt_guild.Guild':
"""
:return: The :class:`.Guild` this webhook is in.
"""
return self._bot.guilds.get(self.guild_id)
@property
def channel(self) -> 'dt_channel.Channel':
"""
:return: The :class:`.Channel` this webhook is in.
"""
if self.guild is None:
return None
return self.guild.channels.get(self.channel_id)
@classmethod
async def create(cls, channel: 'dt_channel.Channel', *,
name: str, avatar: bytes) -> 'Webhook':
"""
Creates a new webhook.
:param channel: The :class:`.Channel` to create the webhook in.
:param name: The name of the webhook to create.
:param avatar: The bytes data for the webhook's default avatar.
:return: A new :class:`.Webhook`.
"""
return await channel.create_webhook(name=name, avatar=avatar)
async def get_token(self) -> str:
"""
Gets the token for this webhook, if no token was set earlier.
:return: The token for the webhook.
"""
if self.token:
return self.token
us = await self._bot.http.get_webhook(self.id)
self.token = us.get("token")
return self.token
async def delete(self) -> None:
"""
Deletes the webhook.
You must either be the owner of this webhook, or the webhook must have a token associated
to delete it.
"""
if self.token is not None:
return await self._bot.http.delete_webhook_with_token(self.id, self.token)
else:
return await self.guild.delete_webhook(self)
async def edit(self, *,
name: str = None, avatar: bytes = None) -> 'Webhook':
"""
Edits this webhook.
:param name: The new name for this webhook.
:param avatar: The bytes-encoded content of the new avatar.
:return: The webhook object.
"""
if avatar is not None:
avatar = base64ify(avatar)
if self.token is not None:
# edit with token, don't pass to guild
data = await self._bot.http.edit_webhook_with_token(self.id, name=name, avatar=avatar)
self.default_name = data.get("name")
self._default_avatar = data.get("avatar")
# Update the user too
self.user.username = data.get("name")
self.user.avatar_hash = data.get("avatar")
else:
await self.channel.edit_webhook(self, name=name, avatar=avatar)
return self
async def execute(self, *,
content: str = None, username: str = None, avatar_url: str = None,
embeds: 'typing.List[dt_embed.Embed]'=None, wait: bool = False) \
-> typing.Union[None, str]:
"""
Executes the webhook.
:param content: Any raw content to send.
:param username: A username to override the default username of the webhook with.
:param avatar_url: The URL for the avatar to override the default avatar with.
:param embeds: A list of embeds to add to the message.
:param wait: Should we wait for the message to arrive before returning?
"""
if embeds:
embeds = [embed.to_dict() for embed in embeds]
if self.token is None:
await self.get_token()
data = await self._bot.http.execute_webhook(self.id, self.token,
content=content, embeds=embeds,
username=username, avatar_url=avatar_url,
wait=wait)
if wait:
return self._bot.state.make_message(data, cache=False)
|
|
# -*- coding: utf-8 -*-
from string import ascii_letters, ascii_uppercase
from random import choice
class Word(object):
'''
Word class for managing relationships to other words
'''
def __init__(self, word):
self.word = word
self.befores = dict()
self.afters = dict()
def add_before(self, other_word):
if other_word not in self.befores:
self.befores[other_word] = 1
self.befores[other_word] += 1
def add_after(self, other_word):
if other_word not in self.afters:
self.afters[other_word] = 1
self.afters[other_word] += 1
@staticmethod
def sort_by_number_of_connections(group):
return sorted(group, key=lambda key: key.number_of_connections)
@property
def is_starting_word(self):
return len(self.befores) == 0
@property
def is_ending_word(self):
return len(self.afters) == 0
@property
def number_of_befores(self):
return sum(self.befores.values())
@property
def number_of_afters(self):
return sum(self.afters.values())
@property
def number_of_connections(self):
return sum(self.afters.values()) + sum(self.befores.values())
@property
def ratio_of_befores(self):
return(self.number_of_befores + 0.0) / self.number_of_connections
@property
def ratio_of_afters(self):
return (self.number_of_afters + 0.0) / self.number_of_connections
@property
def most_paired_before(self):
befores = sorted_dict(self.befores)
return befores[-1][0]
@property
def most_paired_after(self):
afters = sorted_dict(self.afters)
return afters[-1][0]
@property
def most_connected_before(self):
return Word.sort_by_number_of_connections(self.befores.keys())[-1]
@property
def most_connected_after(self):
return Word.sort_by_number_of_connections(self.afters.keys())[-1]
def __eq__(self, other):
return self.word == other
def __str__(self):
return unicode(self.word)
def __unicode__(self):
return unicode(self.word)
def __hash__(self):
return hash(self.word)
def highest_connected_word(data):
''' returns the word with the most connections'''
return Word.sort_by_number_of_connections(data)[-1]
def sorted_dict(dictionary):
''' returns a list of tuples from the sorted items of a dictionary'''
return sorted([(k, v) for k, v in dictionary.iteritems()], key=lambda (k, v) : (v, k))
def clean_line(line):
''' removes unwanted characters from line'''
return ''.join(letter for letter in line if letter not in '')
def is_end_word(word):
'''returns true if the word ends with a sentence-terminating character'''
return any([word.endswith('.'), word.endswith('!'), word.endswith('?')])
def sentencify(data):
''' takes in a string seperated by newlines, then breaks it down into sentences'''
new_data = []
if not isinstance(data[0], unicode):
for line in data:
try:
new_data.append(line.decode('utf-8'))
except UnicodeDecodeError:
hm_line = []
print line
for char in line:
try:
hm_line.append(char.decode('utf-8'))
except UnicodeDecodeError:
print repr(char)
new_data.append(u' '.join(char))
else:
new_data = data[:]
sentences = []
current_sentence = []
for line in new_data:
line = line.strip()
line = clean_line(line)
if line == '':
continue
for word in line.split():
current_sentence.append(word)
if is_end_word(word):
sentences.append(u' '.join(unicode(word) for word in current_sentence))
current_sentence = []
return sentences
def split_into_groups(sentence):
'''splits a sentence into groups of (before, current, after) if possible'''
groups = []
sentence = sentence.split()
for x, word in enumerate(sentence):
if x == 0:
try:
word_dict = {'after' : sentence[x + 1]}
except IndexError:
word_dict = {}
elif x == len(sentence) - 1:
word_dict = {'before' : sentence[x - 1]}
else:
word_dict = {'after' : sentence[x + 1],
'before' : sentence[x - 1]}
groups.append( {word : word_dict} )
return groups
def map_words(text, words=None):
''' Maps the relationships between words in given text
text - a string of data
words - if you've already generated some word relationship, you can add to it
returns a dict mapping words to other words
'''
if words is None:
words = dict()
for sentence in sentencify(text):
sentence = split_into_groups(sentence)
for word_group in sentence:
for word, word_dict in word_group.iteritems():
if word not in words:
words[word] = Word(word)
current_word = words[word]
for key, sub_word in word_dict.iteritems():
if sub_word not in words:
words[sub_word] = Word(sub_word)
word_dict[key] = words[sub_word]
if 'after' in word_dict:
current_word.add_after(word_dict['after'])
if 'before' in word_dict:
current_word.add_before(word_dict['before'])
return words
def make_sentence(data):
'''Generates a random possible sentence based on the relationships in data
data - a word relationship dict
returns a sentence as string
'''
sentence = []
current_word = choice([word for word in data.values() if word.is_starting_word])
sentence.append(current_word)
while True:
if not current_word.afters:
break
current_word = data[choice(current_word.afters.keys())]
sentence.append(current_word)
return ' '.join(str(word) for word in sentence)
def make_longest_sentence(data):
''' Proof of concept. Aims to produce an infinitly long sentence based on the relationship data
Prints output as it goes along, - warning, infinite loop
data - a word relationship dict
'''
already_been = []
current_word = highest_connected_word((word for word in data.values() if word.is_starting_word))
while True:
print current_word + ' ',
already_been.append(current_word)
for word in already_been:
if word in current_word.afters:
current_word = word
break
else:
current_word = current_word.most_connected_after
if current_word.is_ending_word:
break
def make_longest_sentence_with_no_repeats(data):
already_been = []
none_can_be_found = False
current_word = highest_connected_word((word for word in data.values() if word.is_starting_word))
while True:
already_been.append(current_word)
possible_words = Word.sort_by_number_of_connections(current_word.afters)
for word in possible_words[::-1]:
if word not in already_been:
current_word = word
break
else:
none_can_be_found = True
if current_word.is_ending_word or none_can_be_found:
already_been.append(current_word)
break
return ' '.join(already_been)
if __name__ == '__main__':
with open('pg11.txt') as f:
data = [line for line in f if len(line.strip()) > 1]
data = map_words(data)
with open('pg42.txt') as f:
other_data = [line for line in f if len(line.strip()) > 1]
data = map_words(other_data, data)
"""with open('data.txt') as f:
other_data = [line for line in f if len(line.strip()) > 1]
data = map_words(other_data)"""
print make_sentence(data)
|
|
import Pieces
import larv
import PymunkCollisions
import XMLParser
import pymunk
import helper
from Globals import *
class LevelEngine:
"""
Supports a playable level loaded from a xml file.
"""
def __init__(self, filename):
# Initialize the framework
self.entity_factory = Pieces.LevelEntityFactory()
self.engine = larv.Engine(self.entity_factory)
# Reset physics world
SPACE.__init__()
SPACE.gravity = GRAVITY
# Enable custom collisions
PymunkCollisions.createCollisions(self.engine)
# Parse given level
parser = XMLParser.XMLParser()
parser.importXML(filename)
parser.initiateLevel(self.entity_factory)
# Create entities
self.entity_factory.createCamera()
# TODO, add a milion ifs to check if components exist in the level and
# then add systems dinamically depending on those components
# (only add certain systems if the requirements are met, the most
# obvious example being that the IntermitentSystem shouldn't be created
# if no intermitent lights (intermitent components) exist)
em = self.engine.entity_manager
gm = self.engine.group_manager
### Create systems dinamically (this needs to be revised when system
### requirements get changed!!)
# Get info on what groups does the level have
# has__group = gm.doesGroupExist('')
has_hero_group = gm.doesGroupExist('hero')
has_camera_group = gm.doesGroupExist('camera')
has_level_info_group = gm.doesGroupExist('level_info')
# Get bools about components that the current level has
has_intermitent_comp = Pieces.IntermitentComponent.__name__ in em.componentsByClass
has_level_info_comp = Pieces.LevelInfoComponent.__name__ in em.componentsByClass
has_light_comp = Pieces.LightComponent.__name__ in em.componentsByClass
has_move_comp = Pieces.MoveComponent.__name__ in em.componentsByClass
has_on_press_comp = Pieces.OnPressComponent.__name__ in em.componentsByClass
has_physics_comp = Pieces.PhysicsComponent.__name__ in em.componentsByClass
has_position_comp = Pieces.PositionComponent.__name__ in em.componentsByClass
has_render_comp = Pieces.RenderComponent.__name__ in em.componentsByClass
has_state_comp = Pieces.StateComponent.__name__ in em.componentsByClass
# Depending on which components we have, select which systems need to be
# created.
systems = []
if has_camera_group and has_hero_group and has_level_info_group and\
has_level_info_comp and has_position_comp:
systems.append((Pieces.CameraSystem(), 3))
if has_hero_group and has_position_comp and has_state_comp and has_level_info_comp:
systems.append((Pieces.GameManagerSystem(), 0))
if has_level_info_comp and has_position_comp and has_physics_comp\
and has_state_comp and has_hero_group:
systems.append((Pieces.HeroStateSystem(), 19))
if has_intermitent_comp and has_state_comp:
systems.append((Pieces.IntermitentSystem(), 1))
systems.append((Pieces.LastStepSystem(), 100)) #always needs to be there
if has_hero_group and has_move_comp and has_physics_comp\
and has_level_info_comp:
systems.append((Pieces.LevelInputSystem(), 4))
if has_light_comp and has_position_comp and has_level_info_comp \
and has_state_comp and has_camera_group and has_hero_group:
systems.append((Pieces.LightSystem(), 17))
if has_physics_comp and has_move_comp:
systems.append((Pieces.MoveSystem(), 5))
if has_physics_comp and has_position_comp:
systems.append((Pieces.PositionSystem(), 6))
if has_position_comp and has_render_comp:
systems.append((Pieces.RenderSystem(), 20))
systems.append((Pieces.ItemManagerSystem(), 0))
# # Add systems to the engine
# self.engine.addSystem(game_manager_system, 0)
# self.engine.addSystem(intermitent_system, 1)
# self.engine.addSystem(camera_system, 3)
# self.engine.addSystem(input_system, 4)
# self.engine.addSystem(move_system, 5)
# self.engine.addSystem(position_system, 6)
# self.engine.addSystem(light_system, 17)
# self.engine.addSystem(hero_state_system, 19)
# self.engine.addSystem(render_system, 20) # priority, less is before
# self.engine.addSystem(last_step_system, 100)
# Add all the systems to the engine using the priority inserted before
for system in systems:
# print(system[0])
self.engine.addSystem(system[0], system[1])
class MainMenuEngine:
"""
Supports creating a menu.
"""
def __init__(self):
self.entity_factory = Pieces.MenuEntityFactory()
self.engine = larv.Engine(self.entity_factory)
# Position variables (0,0 is botleft)
start_x = WIN_WIDTH//2
start_y = WIN_HEIGHT - 250
exit_x = WIN_WIDTH//2
exit_y = WIN_HEIGHT - 350
# Create entities
# Background
img = pygame.image.load('Images\\menu_screen.png')
self.entity_factory.createRenderObject(img, WIN_WIDTH//2, WIN_HEIGHT//2)
# Button information
button_img = pygame.image.load('Images\\button.png')
font_type = 'Images\\Fonts\\Dimbo regular.ttf'
font_size = 36
# Create start button
def start():
WORLD.push(LevelMenuEngine().engine)
function = start
self.entity_factory.createRenderObject(button_img, start_x, start_y)
self.entity_factory.createText(start_x, start_y, 'START', function=function, args=[],
type=font_type, size=font_size)
def raiseEndException():
raise larv.EndProgramException()
self.entity_factory.createRenderObject(button_img, exit_x, exit_y)
self.entity_factory.createText(exit_x, exit_y, 'END GAME', function=raiseEndException, args=[],
type=font_type, size=font_size)
# Create systems
menu_manager_system = Pieces.MenuManagerSystem()
render_system = Pieces.RenderSystem()
last_step_system = Pieces.LastStepSystem(physics=False)
menu_set_surface_system = Pieces.MenuSetSurfaceSystem()
menu_input_system = Pieces.MenuInputSystem()
# Add systems to the engine
self.engine.addSystem(menu_manager_system, 0)
self.engine.addSystem(menu_input_system, 5)
self.engine.addSystem(menu_set_surface_system, 15)
self.engine.addSystem(render_system, 20)
self.engine.addSystem(last_step_system, 100)
class LevelMenuEngine:
"""
Supports creating a menu.
"""
def __init__(self):
self.entity_factory = Pieces.MenuEntityFactory()
self.engine = larv.Engine(self.entity_factory)
### Create entities
# Load background of the buttons
img = pygame.image.load('Images\\level_button.png')
# Letter info
font = 'Images\\Fonts\\Dimbo Regular.ttf'
size = 24
# Set information of the position of the buttons (5 buttons per line)
# This is only for easier configuration
separation = img.get_width()*2
y_first_line = WIN_HEIGHT - 100
y_second_line = WIN_HEIGHT - 200
y_third_line = WIN_HEIGHT - 300
y_back_button = 100
l1_x = WIN_WIDTH//2 - separation*2
l1_y = y_first_line
l2_x = WIN_WIDTH//2 - separation
l2_y = y_first_line
l3_x = WIN_WIDTH//2
l3_y = y_first_line
l4_x = WIN_WIDTH//2 + separation
l4_y = y_first_line
l5_x = WIN_WIDTH//2 + separation*2
l5_y = y_first_line
#------------------
l6_x = WIN_WIDTH//2 - separation*2
l6_y = y_second_line
l7_x = WIN_WIDTH//2 - separation
l7_y = y_second_line
l8_x = WIN_WIDTH//2
l8_y = y_second_line
l9_x = WIN_WIDTH//2 + separation
l9_y = y_second_line
l10_x = WIN_WIDTH//2 + separation*2
l10_y = y_second_line
#-----------------
l11_x = WIN_WIDTH//2 - separation*2
l11_y = y_third_line
l12_x = 0
l12_y = 0
#-----------------
go_back_x = WIN_WIDTH//2
go_back_y = y_back_button
# Add all the info we have got into a list for easier creation of the
# buttons.
buttons_position = [(l1_x, l1_y), (l2_x, l2_y), (l3_x, l3_y), (l4_x, l4_y),
(l5_x, l5_y), (l6_x, l6_y), (l7_x, l7_y), (l8_x, l8_y),
(l9_x, l9_y), (l10_x, l10_y)]
# Append the back button
buttons_position.append((go_back_x, go_back_y))
# Create the background of the buttons, the button image
for position in buttons_position:
x, y = position[0], position[1]
self.entity_factory.createRenderObject(img, x, y)
# Create the texts of the level buttons, which link to the functions
# to load the levels.
def loadLevelN(level):
"""
Checks if that level is avaiable to play (the level before it has
been complete). If that holds True allows to play it.
Doesn't work for level one.
@level: of the form 'level1'
"""
# Level1 is an exception, should be always loaded
if level == 'level1':
level = 'levels\\' + level + '.xml'
WORLD.push(LevelEngine(level).engine)
# pygame.mixer.music.stop()
return
save_file = helper.getSaveFile()
# Separate letters from numbers
level_before = ''
digits = ''
for char in level:
if char.isdigit():
digits += str(int(char))
else:
level_before += char
# Substract 1
digits = str(int(digits)-1)
# Form the level before
level_before += digits
# If the previous level was complete, we allow to load it
if save_file[level_before]['complete']:
level = 'levels\\' + level + '.xml'
WORLD.push(LevelEngine(level).engine)
# pygame.mixer.music.stop()
else: #we don't allow it
print('Can\'t be allowed')
for n, position in enumerate(buttons_position[:-1], start=1):
x, y = position[0], position[1]
level = 'level' + str(n)
self.entity_factory.createText(x,y, str(n), function=loadLevelN,
args=[level], size=size, type=font)
# Create the go back text
def goBack():
WORLD.pop()
self.entity_factory.createText(go_back_x, go_back_y, 'BACK',
function=goBack, args=[], size=size,
type=font)
# Create systems
menu_manager_system = Pieces.MenuManagerSystem()
render_system = Pieces.RenderSystem()
last_step_system = Pieces.LastStepSystem(physics=False)
menu_set_surface_system = Pieces.MenuSetSurfaceSystem()
menu_input_system = Pieces.MenuInputSystem()
# Add systems
self.engine.addSystem(menu_manager_system, 0)
self.engine.addSystem(menu_input_system, 5)
self.engine.addSystem(menu_set_surface_system, 15)
self.engine.addSystem(render_system, 20)
self.engine.addSystem(last_step_system, 100)
class StartMenuEngine:
"""
Supports creating a menu.
"""
def __init__(self):
self.entity_factory = Pieces.MenuEntityFactory()
self.engine = larv.Engine(self.entity_factory)
img = pygame.image.load('Images\\start_screen.png')
self.entity_factory.createRenderObject(img,WIN_WIDTH//2,WIN_HEIGHT//2)
render_system = Pieces.RenderSystem()
last_step_system = Pieces.LastStepSystem(physics=False)
menu_input_system = Pieces.StartMenuInputSystem()
self.engine.addSystem(menu_input_system, 5)
self.engine.addSystem(render_system, 10)
self.engine.addSystem(last_step_system, 15)
|
|
"""Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
iter_lim=None, show=False, calc_var=False, x0=None):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||b - Ax||^2`` or
``min ||Ax - b||^2 + d^2 ||x||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve A*x = b
2. Linear least squares -- solve A*x = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( 0 )
in the least-squares sense
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
Representation of an m-by-n matrix. It is required that
the linear operator can produce ``Ax`` and ``A^T x``.
b : array_like, shape (m,)
Right-hand side vector ``b``.
damp : float
Damping coefficient.
atol, btol : float, optional
Stopping tolerances. If both are 1.0e-9 (say), the final
residual norm should be accurate to about 9 digits. (The
final x will usually have fewer correct digits, depending on
cond(A) and the size of damp.)
conlim : float, optional
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive.
iter_lim : int, optional
Explicit limitation on number of iterations (for safety).
show : bool, optional
Display an iteration log.
calc_var : bool, optional
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
x0 : array_like, shape (n,), optional
Initial guess of x, if None zeros are used.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if
``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'*r - damp^2*x)``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A*x0``.
2. Use LSQR to solve the system ``A*dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A*x = b and k2 iterations to solve A*dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A*x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A*dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M*x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A*M(inverse)*z =
b``, after which x can be recovered by solving M*x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lsqr
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution `[0, 0]`
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsqr(A, b)[:4]
The exact solution is x = 0
>>> istop
0
>>> x
array([ 0., 0.])
The stopping code `istop=0` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> r1norm
4.440892098500627e-16
As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance
limits. The given solution `[1., -1.]` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> r1norm
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `r1norm`
contains the norm of the minimal residual that was found.
"""
A = aslinearoperator(A)
b = np.atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = 'The matrix A has %8g rows and %8g cols' % (m, n)
str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var)
str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)
str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim)
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
"""
Set up the first vectors u and v for the bidiagonalization.
These satisfy beta*u = b - A*x, alfa*v = A'*u.
"""
u = b
bnorm = np.linalg.norm(b)
if x0 is None:
x = np.zeros(n)
beta = bnorm.copy()
else:
x = np.asarray(x0)
u = u - A.matvec(x)
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
else:
v = x.copy()
alfa = 0
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
"""
% Perform the next step of the bidiagonalization to obtain the
% next beta, u, alfa, v. These satisfy the relations
% beta*u = a*v - alfa*u,
% alfa*v = A'*u - beta*v.
"""
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
rhobar1 = sqrt(rhobar**2 + damp**2)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
# Although there is cancellation, it might be accurate enough.
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
if show:
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (anorm, acond)
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm)
str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm)
str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm)
str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm)
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
|
|
"""
fs.s3fs
=======
**Currently only avaiable on Python2 due to boto not being available for Python3**
FS subclass accessing files in Amazon S3
This module provides the class 'S3FS', which implements the FS filesystem
interface for objects stored in Amazon Simple Storage Service (S3).
"""
import os
import datetime
import tempfile
from fnmatch import fnmatch
import stat as statinfo
import boto.s3.connection
from boto.s3.prefix import Prefix
from boto.exception import S3ResponseError
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.remote import *
from fs.filelike import LimitBytesFile
from fs import iotools
import six
# Boto is not thread-safe, so we need to use a per-thread S3 connection.
if hasattr(threading,"local"):
thread_local = threading.local
else:
class thread_local(object):
def __init__(self):
self._map = {}
def __getattr__(self,attr):
try:
return self._map[(threading.currentThread(),attr)]
except KeyError:
raise AttributeError, attr
def __setattr__(self,attr,value):
self._map[(threading.currentThread(),attr)] = value
class S3FS(FS):
"""A filesystem stored in Amazon S3.
This class provides the FS interface for files stored in Amazon's Simple
Storage Service (S3). It should be instantiated with the name of the
S3 bucket to use, and optionally a prefix under which the files should
be stored.
Local temporary files are used when opening files from this filesystem,
and any changes are only pushed back into S3 when the files are closed
or flushed.
"""
_meta = {'thread_safe': True,
'virtual': False,
'read_only': False,
'unicode_paths': True,
'case_insensitive_paths': False,
'network': True,
'atomic.move': True,
'atomic.copy': True,
'atomic.makedir': True,
'atomic.rename': False,
'atomic.setcontent': True
}
class meta:
PATH_MAX = None
NAME_MAX = None
def __init__(self, bucket, prefix="", aws_access_key=None, aws_secret_key=None, separator="/", thread_synchronize=True, key_sync_timeout=1):
"""Constructor for S3FS objects.
S3FS objects require the name of the S3 bucket in which to store
files, and can optionally be given a prefix under which the files
should be stored. The AWS public and private keys may be specified
as additional arguments; if they are not specified they will be
read from the two environment variables AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY.
The keyword argument 'key_sync_timeout' specifies the maximum
time in seconds that the filesystem will spend trying to confirm
that a newly-uploaded S3 key is available for reading. For no
timeout set it to zero. To disable these checks entirely (and
thus reduce the filesystem's consistency guarantees to those of
S3's "eventual consistency" model) set it to None.
By default the path separator is "/", but this can be overridden
by specifying the keyword 'separator' in the constructor.
"""
self._bucket_name = bucket
self._access_keys = (aws_access_key,aws_secret_key)
self._separator = separator
self._key_sync_timeout = key_sync_timeout
# Normalise prefix to this form: path/to/files/
prefix = normpath(prefix)
while prefix.startswith(separator):
prefix = prefix[1:]
if not prefix.endswith(separator) and prefix != "":
prefix = prefix + separator
if isinstance(prefix,unicode):
prefix = prefix.encode("utf8")
self._prefix = prefix
self._tlocal = thread_local()
super(S3FS, self).__init__(thread_synchronize=thread_synchronize)
# Make _s3conn and _s3bukt properties that are created on demand,
# since they cannot be stored during pickling.
def _s3conn(self):
try:
(c,ctime) = self._tlocal.s3conn
if time.time() - ctime > 60:
raise AttributeError
return c
except AttributeError:
c = boto.s3.connection.S3Connection(*self._access_keys)
self._tlocal.s3conn = (c,time.time())
return c
_s3conn = property(_s3conn)
def _s3bukt(self):
try:
(b,ctime) = self._tlocal.s3bukt
if time.time() - ctime > 60:
raise AttributeError
return b
except AttributeError:
try:
# Validate by listing the bucket if there is no prefix.
# If there is a prefix, validate by listing only the prefix
# itself, to avoid errors when an IAM policy has been applied.
if self._prefix:
b = self._s3conn.get_bucket(self._bucket_name, validate=0)
b.get_key(self._prefix)
else:
b = self._s3conn.get_bucket(self._bucket_name, validate=1)
except S3ResponseError, e:
if "404 Not Found" not in str(e):
raise
b = self._s3conn.create_bucket(self._bucket_name)
self._tlocal.s3bukt = (b,time.time())
return b
_s3bukt = property(_s3bukt)
def __getstate__(self):
state = super(S3FS,self).__getstate__()
del state['_tlocal']
return state
def __setstate__(self,state):
super(S3FS,self).__setstate__(state)
self._tlocal = thread_local()
def __repr__(self):
args = (self.__class__.__name__,self._bucket_name,self._prefix)
return '<%s: %s:%s>' % args
__str__ = __repr__
def _s3path(self,path):
"""Get the absolute path to a file stored in S3."""
path = relpath(normpath(path))
path = self._separator.join(iteratepath(path))
s3path = self._prefix + path
if s3path and s3path[-1] == self._separator:
s3path = s3path[:-1]
if isinstance(s3path,unicode):
s3path = s3path.encode("utf8")
return s3path
def _uns3path(self,s3path,roots3path=None):
"""Get the local path for a file stored in S3.
This is essentially the opposite of self._s3path().
"""
if roots3path is None:
roots3path = self._s3path("")
i = len(roots3path)
return s3path[i:]
def _sync_key(self,k):
"""Synchronise on contents of the given key.
Since S3 only offers "eventual consistency" of data, it is possible
to create a key but be unable to read it back straight away. This
method works around that limitation by polling the key until it reads
back the value expected by the given key.
Note that this could easily fail if the key is modified by another
program, meaning the content will never be as specified in the given
key. This is the reason for the timeout argument to the construtcor.
"""
timeout = self._key_sync_timeout
if timeout is None:
return k
k2 = self._s3bukt.get_key(k.name)
t = time.time()
while k2 is None or k2.etag != k.etag:
if timeout > 0:
if t + timeout < time.time():
break
time.sleep(0.1)
k2 = self._s3bukt.get_key(k.name)
return k2
def _sync_set_contents(self,key,contents):
"""Synchronously set the contents of a key."""
if isinstance(key,basestring):
key = self._s3bukt.new_key(key)
if isinstance(contents,basestring):
key.set_contents_from_string(contents)
elif hasattr(contents,"md5"):
hexmd5 = contents.md5
b64md5 = hexmd5.decode("hex").encode("base64").strip()
key.set_contents_from_file(contents,md5=(hexmd5,b64md5))
else:
try:
contents.seek(0)
except (AttributeError,EnvironmentError):
tf = tempfile.TemporaryFile()
data = contents.read(524288)
while data:
tf.write(data)
data = contents.read(524288)
tf.seek(0)
key.set_contents_from_file(tf)
else:
key.set_contents_from_file(contents)
return self._sync_key(key)
def makepublic(self, path):
"""Mark given path as publicly accessible using HTTP(S)"""
s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path)
k.make_public()
def getpathurl(self, path, allow_none=False, expires=3600):
"""Returns a url that corresponds to the given path."""
s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path)
# Is there AllUsers group with READ permissions?
is_public = True in [grant.permission == 'READ' and
grant.uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
for grant in k.get_acl().acl.grants]
url = k.generate_url(expires, force_http=is_public)
if url == None:
if not allow_none:
raise NoPathURLError(path=path)
return None
if is_public:
# Strip time token; it has no sense for public resource
url = url.split('?')[0]
return url
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
s3path = self._s3path(path)
if isinstance(data, six.text_type):
data = data.encode(encoding=encoding, errors=errors)
self._sync_set_contents(s3path, data)
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
"""Open the named file in the given mode.
This method downloads the file contents into a local temporary file
so that it can be worked on efficiently. Any changes made to the
file are only sent back to S3 when the file is flushed or closed.
"""
if self.isdir(path):
raise ResourceInvalidError(path)
s3path = self._s3path(path)
# Truncate the file if requested
if "w" in mode:
k = self._sync_set_contents(s3path,"")
else:
k = self._s3bukt.get_key(s3path)
if k is None:
# Create the file if it's missing
if "w" not in mode and "a" not in mode:
raise ResourceNotFoundError(path)
if not self.isdir(dirname(path)):
raise ParentDirectoryMissingError(path)
k = self._sync_set_contents(s3path,"")
# Make sure nothing tries to read past end of socket data
f = LimitBytesFile(k.size,k,"r")
# For streaming reads, return the key object directly
if mode == "r-":
return f
# For everything else, use a RemoteFileBuffer.
# This will take care of closing the socket when it's done.
return RemoteFileBuffer(self,path,mode,f)
def exists(self,path):
"""Check whether a path exists."""
s3path = self._s3path(path)
s3pathD = s3path + self._separator
# The root directory always exists
if self._prefix.startswith(s3path):
return True
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
# A regular file
if _eq_utf8(k.name,s3path):
return True
# A directory
if _eq_utf8(k.name,s3pathD):
return True
return False
def isdir(self,path):
"""Check whether a path exists and is a directory."""
s3path = self._s3path(path) + self._separator
# Root is always a directory
if s3path == "/" or s3path == self._prefix:
return True
# Use a list request so that we return true if there are any files
# in that directory. This avoids requiring a special file for the
# the directory itself, which other tools may not create.
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
try:
iter(ks).next()
except StopIteration:
return False
else:
return True
def isfile(self,path):
"""Check whether a path exists and is a regular file."""
s3path = self._s3path(path)
# Root is never a file
if self._prefix.startswith(s3path):
return False
k = self._s3bukt.get_key(s3path)
if k is not None:
return True
return False
def listdir(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
"""List contents of a directory."""
return list(self.ilistdir(path,wildcard,full,absolute,
dirs_only,files_only))
def listdirinfo(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
return list(self.ilistdirinfo(path,wildcard,full,absolute,
dirs_only,files_only))
def ilistdir(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
"""List contents of a directory."""
keys = self._iter_keys(path)
entries = self._filter_keys(path,keys,wildcard,full,absolute,
dirs_only,files_only)
return (nm for (nm,k) in entries)
def ilistdirinfo(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
keys = self._iter_keys(path)
entries = self._filter_keys(path,keys,wildcard,full,absolute,
dirs_only,files_only)
return ((nm,self._get_key_info(k,nm)) for (nm,k) in entries)
def _iter_keys(self,path):
"""Iterator over keys contained in the given directory.
This generator yields (name,key) pairs for each entry in the given
directory. If the path is not a directory, it raises the approprate
error.
"""
s3path = self._s3path(path) + self._separator
if s3path == "/":
s3path = ""
isDir = False
for k in self._s3bukt.list(prefix=s3path,delimiter=self._separator):
if not isDir:
isDir = True
# Skip over the entry for the directory itself, if it exists
name = self._uns3path(k.name,s3path)
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if name.endswith(self._separator):
name = name[:-1]
yield (name,k)
if not isDir:
if s3path != self._prefix:
if self.isfile(path):
msg = "that's not a directory: %(path)s"
raise ResourceInvalidError(path,msg=msg)
raise ResourceNotFoundError(path)
def _key_is_dir(self, k):
if isinstance(k,Prefix):
return True
if k.name.endswith(self._separator):
return True
return False
def _filter_keys(self,path,keys,wildcard,full,absolute,
dirs_only,files_only):
"""Filter out keys not matching the given criteria.
Given a (name,key) iterator as returned by _iter_keys, this method
applies the given filtering criteria and returns a filtered iterator.
"""
sep = self._separator
if dirs_only and files_only:
raise ValueError("dirs_only and files_only can not both be True")
if dirs_only:
keys = ((nm,k) for (nm,k) in keys if self._key_is_dir(k))
elif files_only:
keys = ((nm,k) for (nm,k) in keys if not self._key_is_dir(k))
if wildcard is not None:
if callable(wildcard):
keys = ((nm,k) for (nm,k) in keys if wildcard(nm))
else:
keys = ((nm,k) for (nm,k) in keys if fnmatch(nm,wildcard))
if full:
return ((relpath(pathjoin(path, nm)),k) for (nm,k) in keys)
elif absolute:
return ((abspath(pathjoin(path, nm)),k) for (nm,k) in keys)
return keys
def makedir(self,path,recursive=False,allow_recreate=False):
"""Create a directory at the given path.
The 'mode' argument is accepted for compatibility with the standard
FS interface, but is currently ignored.
"""
s3path = self._s3path(path)
s3pathD = s3path + self._separator
if s3pathD == self._prefix:
if allow_recreate:
return
msg = "Can not create a directory that already exists"\
" (try allow_recreate=True): %(path)s"
raise DestinationExistsError(path, msg=msg)
s3pathP = self._s3path(dirname(path))
if s3pathP:
s3pathP = s3pathP + self._separator
# Check various preconditions using list of parent dir
ks = self._s3bukt.list(prefix=s3pathP,delimiter=self._separator)
if s3pathP == self._prefix:
parentExists = True
else:
parentExists = False
for k in ks:
if not parentExists:
parentExists = True
if _eq_utf8(k.name,s3path):
# It's already a file
msg = "Destination exists as a regular file: %(path)s"
raise ResourceInvalidError(path, msg=msg)
if _eq_utf8(k.name,s3pathD):
# It's already a directory
if allow_recreate:
return
msg = "Can not create a directory that already exists"\
" (try allow_recreate=True): %(path)s"
raise DestinationExistsError(path, msg=msg)
# Create parent if required
if not parentExists:
if recursive:
self.makedir(dirname(path),recursive,allow_recreate)
else:
msg = "Parent directory does not exist: %(path)s"
raise ParentDirectoryMissingError(path, msg=msg)
# Create an empty file representing the directory
self._sync_set_contents(s3pathD,"")
def remove(self,path):
"""Remove the file at the given path."""
s3path = self._s3path(path)
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
if _eq_utf8(k.name,s3path):
break
if _startswith_utf8(k.name,s3path + "/"):
msg = "that's not a file: %(path)s"
raise ResourceInvalidError(path,msg=msg)
else:
raise ResourceNotFoundError(path)
self._s3bukt.delete_key(s3path)
k = self._s3bukt.get_key(s3path)
while k:
k = self._s3bukt.get_key(s3path)
def removedir(self,path,recursive=False,force=False):
"""Remove the directory at the given path."""
if normpath(path) in ('', '/'):
raise RemoveRootError(path)
s3path = self._s3path(path)
if s3path != self._prefix:
s3path = s3path + self._separator
if force:
# If we will be forcibly removing any directory contents, we
# might as well get the un-delimited list straight away.
ks = self._s3bukt.list(prefix=s3path)
else:
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
# Fail if the directory is not empty, or remove them if forced
found = False
for k in ks:
found = True
if not _eq_utf8(k.name,s3path):
if not force:
raise DirectoryNotEmptyError(path)
self._s3bukt.delete_key(k.name)
if not found:
if self.isfile(path):
msg = "removedir() called on a regular file: %(path)s"
raise ResourceInvalidError(path,msg=msg)
if path not in ("","/"):
raise ResourceNotFoundError(path)
self._s3bukt.delete_key(s3path)
if recursive and path not in ("","/"):
pdir = dirname(path)
try:
self.removedir(pdir,recursive=True,force=False)
except DirectoryNotEmptyError:
pass
def rename(self,src,dst):
"""Rename the file at 'src' to 'dst'."""
# Actually, in S3 'rename' is exactly the same as 'move'
if self.isfile(src):
self.move(src,dst)
else:
self.movedir(src,dst)
def getinfo(self,path):
s3path = self._s3path(path)
if path in ("","/"):
k = Prefix(bucket=self._s3bukt,name="/")
else:
k = self._s3bukt.get_key(s3path)
if k is None:
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
if isinstance(k,Prefix):
break
else:
raise ResourceNotFoundError(path)
return self._get_key_info(k,path)
def _get_key_info(self,key,name=None):
info = {}
if name is not None:
info["name"] = basename(name)
else:
info["name"] = basename(self._uns3key(k.name))
if self._key_is_dir(key):
info["st_mode"] = 0700 | statinfo.S_IFDIR
else:
info["st_mode"] = 0700 | statinfo.S_IFREG
if hasattr(key,"size"):
info['size'] = int(key.size)
etag = getattr(key,"etag",None)
if etag is not None:
if isinstance(etag,unicode):
etag = etag.encode("utf8")
info['etag'] = etag.strip('"').strip("'")
if hasattr(key,"last_modified"):
# TODO: does S3 use any other formats?
fmt = "%a, %d %b %Y %H:%M:%S %Z"
try:
mtime = datetime.datetime.strptime(key.last_modified,fmt)
info['modified_time'] = mtime
except ValueError:
pass
return info
def desc(self,path):
return "No description available"
def copy(self,src,dst,overwrite=False,chunk_size=16384):
"""Copy a file from 'src' to 'dst'.
src -- The source path
dst -- The destination path
overwrite -- If True, then the destination may be overwritten
(if a file exists at that location). If False then an exception will be
thrown if the destination exists
chunk_size -- Size of chunks to use in copy (ignored by S3)
"""
s3path_dst = self._s3path(dst)
s3path_dstD = s3path_dst + self._separator
# Check for various preconditions.
ks = self._s3bukt.list(prefix=s3path_dst,delimiter=self._separator)
dstOK = False
for k in ks:
# It exists as a regular file
if _eq_utf8(k.name,s3path_dst):
if not overwrite:
raise DestinationExistsError(dst)
dstOK = True
break
# Check if it refers to a directory. If so, we copy *into* it.
# Since S3 lists in lexicographic order, subsequent iterations
# of the loop will check for the existence of the new filename.
if _eq_utf8(k.name,s3path_dstD):
nm = basename(src)
dst = pathjoin(dirname(dst),nm)
s3path_dst = s3path_dstD + nm
dstOK = True
if not dstOK and not self.isdir(dirname(dst)):
msg = "Destination directory does not exist: %(path)s"
raise ParentDirectoryMissingError(dst,msg=msg)
# OK, now we can copy the file.
s3path_src = self._s3path(src)
try:
self._s3bukt.copy_key(s3path_dst,self._bucket_name,s3path_src)
except S3ResponseError, e:
if "404 Not Found" in str(e):
msg = "Source is not a file: %(path)s"
raise ResourceInvalidError(src, msg=msg)
raise e
else:
k = self._s3bukt.get_key(s3path_dst)
while k is None:
k = self._s3bukt.get_key(s3path_dst)
self._sync_key(k)
def move(self,src,dst,overwrite=False,chunk_size=16384):
"""Move a file from one location to another."""
self.copy(src,dst,overwrite=overwrite)
self._s3bukt.delete_key(self._s3path(src))
def walkfiles(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield item
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if not k.name.endswith(self._separator):
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield pathjoin(path,name)
def walkinfo(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield (item,self.getinfo(item))
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield (pathjoin(path,name),self._get_key_info(k,name))
def walkfilesinfo(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield (item,self.getinfo(item))
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if not k.name.endswith(self._separator):
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield (pathjoin(path,name),self._get_key_info(k,name))
def _eq_utf8(name1,name2):
if isinstance(name1,unicode):
name1 = name1.encode("utf8")
if isinstance(name2,unicode):
name2 = name2.encode("utf8")
return name1 == name2
def _startswith_utf8(name1,name2):
if isinstance(name1,unicode):
name1 = name1.encode("utf8")
if isinstance(name2,unicode):
name2 = name2.encode("utf8")
return name1.startswith(name2)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
"""
test_flatson
----------------------------------
Tests for `flatson` module.
"""
import json
import os
import skinfer
import unittest
from flatson import Flatson
import tempfile
EMPTY_SCHEMA = skinfer.generate_schema({})
SIMPLE_SCHEMA = skinfer.generate_schema({'a_prop': ''})
LIST_SCHEMA = skinfer.generate_schema([])
SAMPLE_WITH_LIST_OF_OBJECTS = {
'first': 'hello',
'list': [{'key1': 'value1', 'key2': 'value2'}, {'key1': 'value3', 'key2': 'value4'}]
}
SAMPLE_WITH_LIST_OF_TUPLES = {
'first': 'hello',
'list': [['value1', 'value2'], ['value3', 'value4']]
}
class TestFlatson(unittest.TestCase):
def test_create(self):
f = Flatson(schema=SIMPLE_SCHEMA)
assert f.schema == SIMPLE_SCHEMA
def test_create_from_schemafile(self):
_, fname = tempfile.mkstemp()
try:
with open(fname, 'w') as f:
json.dump(SIMPLE_SCHEMA, f)
obj = Flatson.from_schemafile(fname)
self.assertEquals(SIMPLE_SCHEMA, obj.schema)
finally:
os.remove(fname)
def test_no_support_for_list_objects(self):
with self.assertRaises(ValueError):
Flatson(schema=LIST_SCHEMA)
def test_when_no_declared_properties_flatten_empty_list(self):
f = Flatson(schema=EMPTY_SCHEMA)
result = f.flatten({'a_prop': 'a_value'})
self.assertEquals([], result)
def test_convert_simple_objects(self):
f = Flatson(schema=SIMPLE_SCHEMA)
self.assertEquals(['a_prop'], f.fieldnames)
self.assertEquals(['a_value'], f.flatten({'a_prop': 'a_value'}))
self.assertEquals([None], f.flatten({}))
def test_convert_nested_objects(self):
contain_nested_object = {
'first': 'hello',
'second': {
'one': 1,
'two': 2,
}
}
schema = skinfer.generate_schema(contain_nested_object)
f = Flatson(schema=schema)
self.assertEquals(['first', 'second.one', 'second.two'], f.fieldnames)
self.assertEquals(['hello', 1, 2], f.flatten(contain_nested_object))
def test_flatten_dict(self):
contain_nested_object = {
'first': 'hello',
'second': {
'one': 1,
'two': 2,
}
}
schema = skinfer.generate_schema(contain_nested_object)
f = Flatson(schema=schema)
expected = {'first': 'hello', 'second.one': 1, 'second.two': 2}
self.assertEquals(expected, f.flatten_dict(contain_nested_object))
def test_convert_deep_nested_objects(self):
contain_nested_object = {
'first': 'hello',
'second': {
'one': {
'a': 1,
'b': 2,
},
'two': {
'a': 3,
'b': 4,
},
}
}
schema = skinfer.generate_schema(contain_nested_object)
f = Flatson(schema=schema)
self.assertEquals(['first', 'second.one.a', 'second.one.b', 'second.two.a', 'second.two.b'], f.fieldnames)
self.assertEquals(['hello', 1, 2, 3, 4], f.flatten(contain_nested_object))
def test_convert_object_with_simple_list_with_default_serialization(self):
contain_list = {
'first': 'hello',
'list': [1, 2, 3, 4],
'list2': ['one', 'two'],
}
schema = skinfer.generate_schema(contain_list)
f = Flatson(schema=schema)
self.assertEquals(['first', 'list', 'list2'], f.fieldnames)
self.assertEquals(['hello', '[1,2,3,4]', '["one","two"]'], f.flatten(contain_list))
def test_convert_object_with_nested_simple_list_with_default_serialization(self):
contain_list = {
'first': 'hello',
'second': {
'list1': [1, 2, 3, 4],
'word': 'world',
},
}
schema = skinfer.generate_schema(contain_list)
f = Flatson(schema=schema)
self.assertEquals(['first', 'second.list1', 'second.word'], f.fieldnames)
self.assertEquals(['hello', '[1,2,3,4]', 'world'], f.flatten(contain_list))
def test_convert_object_with_simple_list_with_join_serialization(self):
# given:
contain_list = {
'first': 'hello',
'list': [1, 2, 3, 4],
'list2': ['one', 'two'],
}
schema = skinfer.generate_schema(contain_list)
serialize_options = dict(method='join_values')
schema['properties']['list']['flatson_serialize'] = serialize_options
# when:
f = Flatson(schema=schema)
# then:
self.assertEquals(['first', 'list', 'list2'], f.fieldnames)
self.assertEquals(['hello', '1,2,3,4', '["one","two"]'], f.flatten(contain_list))
# and when:
schema['properties']['list']['flatson_serialize']['separator'] = '+'
f = Flatson(schema=schema)
# then:
self.assertEquals(['hello', '1+2+3+4', '["one","two"]'], f.flatten(contain_list))
def test_lists_with_objects_with_default_serialization(self):
# given:
schema = skinfer.generate_schema(SAMPLE_WITH_LIST_OF_OBJECTS)
f = Flatson(schema=schema)
# when:
result = f.flatten(SAMPLE_WITH_LIST_OF_OBJECTS)
# then:
expected = '[{"key1":"value1","key2":"value2"},{"key1":"value3","key2":"value4"}]'
self.assertEquals(['first', 'list'], f.fieldnames)
self.assertEquals(['hello', expected], result)
def test_array_serialization_with_extract_key_values(self):
# given:
schema = skinfer.generate_schema(SAMPLE_WITH_LIST_OF_OBJECTS)
serialize_options = dict(method='extract_key_values')
# when:
schema['properties']['list']['flatson_serialize'] = serialize_options
f = Flatson(schema=schema)
result = f.flatten(SAMPLE_WITH_LIST_OF_OBJECTS)
# then:
expected = 'key1:value1,key2:value2;key1:value3,key2:value4'
self.assertEquals(['first', 'list'], f.fieldnames)
self.assertEquals(['hello', expected], result)
def test_array_serialization_with_extract_key_values_custom_separators(self):
# given:
schema = skinfer.generate_schema(SAMPLE_WITH_LIST_OF_OBJECTS)
serialize_options = dict(method='extract_key_values',
separators=('|', '-', '='))
# when:
schema['properties']['list']['flatson_serialize'] = serialize_options
f = Flatson(schema=schema)
result = f.flatten(SAMPLE_WITH_LIST_OF_OBJECTS)
# then:
expected = 'key1=value1-key2=value2|key1=value3-key2=value4'
self.assertEquals(['first', 'list'], f.fieldnames)
self.assertEquals(['hello', expected], result)
def test_array_serialization_with_extract_first(self):
# given:
sample = {'first': 'hello', 'list': ['one', 'two']}
schema = skinfer.generate_schema(sample)
serialize_options = dict(method='extract_first')
schema['properties']['list']['flatson_serialize'] = serialize_options
# when:
f = Flatson(schema=schema)
result = f.flatten(sample)
# then:
self.assertEquals(['first', 'list'], f.fieldnames)
self.assertEquals(['hello', 'one'], result)
# and when:
sample2 = {'first': 'hello', 'list': []}
result = f.flatten(sample2)
# then:
self.assertEquals(['first', 'list'], f.fieldnames)
self.assertEquals(['hello', None], result)
def test_register_custom_serialization_method(self):
# given:
sample = {'first': 'hello', 'list': ['one', 'two']}
schema = skinfer.generate_schema(sample)
serialize_options = dict(method='always_one')
schema['properties']['list']['flatson_serialize'] = serialize_options
# when:
f = Flatson(schema=schema)
f.register_serialization_method('always_one', lambda _v, **kw: '1')
result = f.flatten(sample)
# then:
self.assertEquals(['first', 'list'], f.fieldnames)
self.assertEquals(['hello', '1'], result)
def test_disallow_overwriting_official_serialization_methods(self):
# given:
sample = {'first': 'hello', 'list': ['one', 'two']}
schema = skinfer.generate_schema(sample)
serialize_options = dict(method='always_one')
schema['properties']['list']['flatson_serialize'] = serialize_options
# when:
f = Flatson(schema=schema)
with self.assertRaises(ValueError):
f.register_serialization_method('extract_first', lambda _v, **kw: _v[2])
if __name__ == '__main__':
unittest.main()
|
|
## @file
#
# This file is the main entry for UPT
#
# Copyright (c) 2011 - 2017, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
UPT
'''
## import modules
#
import locale
import sys
encoding = locale.getdefaultlocale()[1]
if encoding:
reload(sys)
sys.setdefaultencoding(encoding)
from Core import FileHook
import os.path
from sys import platform
import platform as pf
from optparse import OptionParser
from traceback import format_exc
from platform import python_version
from Logger import StringTable as ST
import Logger.Log as Logger
from Logger.StringTable import MSG_VERSION
from Logger.StringTable import MSG_DESCRIPTION
from Logger.StringTable import MSG_USAGE
from Logger.ToolError import FILE_NOT_FOUND
from Logger.ToolError import OPTION_MISSING
from Logger.ToolError import FILE_TYPE_MISMATCH
from Logger.ToolError import OPTION_CONFLICT
from Logger.ToolError import FatalError
from Logger.ToolError import UPT_ALREADY_INSTALLED_ERROR
from Common.MultipleWorkspace import MultipleWorkspace as mws
import MkPkg
import InstallPkg
import RmPkg
import InventoryWs
import ReplacePkg
import TestInstall
from Library.Misc import GetWorkspace
from Library import GlobalData
from Core.IpiDb import IpiDatabase
from BuildVersion import gBUILD_VERSION
## CheckConflictOption
#
# CheckConflictOption
#
def CheckConflictOption(Opt):
if (Opt.PackFileToCreate or Opt.PackFileToInstall or Opt.PackFileToRemove or Opt.PackFileToReplace) \
and Opt.InventoryWs:
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_L_OA_EXCLUSIVE)
elif Opt.PackFileToReplace and (Opt.PackFileToCreate or Opt.PackFileToInstall or Opt.PackFileToRemove):
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_U_ICR_EXCLUSIVE)
elif (Opt.PackFileToCreate and Opt.PackFileToInstall and Opt.PackFileToRemove):
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_REQUIRE_I_C_R_OPTION)
elif Opt.PackFileToCreate and Opt.PackFileToInstall:
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_I_C_EXCLUSIVE)
elif Opt.PackFileToInstall and Opt.PackFileToRemove:
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_I_R_EXCLUSIVE)
elif Opt.PackFileToCreate and Opt.PackFileToRemove:
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_C_R_EXCLUSIVE)
elif Opt.TestDistFiles and (Opt.PackFileToCreate or Opt.PackFileToInstall \
or Opt.PackFileToRemove or Opt.PackFileToReplace):
Logger.Error("UPT", OPTION_CONFLICT, ExtraData=ST.ERR_C_R_EXCLUSIVE)
if Opt.CustomPath and Opt.UseGuidedPkgPath:
Logger.Warn("UPT", ST.WARN_CUSTOMPATH_OVERRIDE_USEGUIDEDPATH)
Opt.UseGuidedPkgPath = False
## SetLogLevel
#
def SetLogLevel(Opt):
if Opt.opt_verbose:
Logger.SetLevel(Logger.VERBOSE)
elif Opt.opt_quiet:
Logger.SetLevel(Logger.QUIET + 1)
elif Opt.debug_level != None:
if Opt.debug_level < 0 or Opt.debug_level > 9:
Logger.Warn("UPT", ST.ERR_DEBUG_LEVEL)
Logger.SetLevel(Logger.INFO)
else:
Logger.SetLevel(Opt.debug_level + 1)
elif Opt.opt_slient:
Logger.SetLevel(Logger.SILENT)
else:
Logger.SetLevel(Logger.INFO)
## Main
#
# Main
#
def Main():
Logger.Initialize()
Parser = OptionParser(version=(MSG_VERSION + ' Build ' + gBUILD_VERSION), description=MSG_DESCRIPTION,
prog="UPT.exe", usage=MSG_USAGE)
Parser.add_option("-d", "--debug", action="store", type="int", dest="debug_level", help=ST.HLP_PRINT_DEBUG_INFO)
Parser.add_option("-v", "--verbose", action="store_true", dest="opt_verbose",
help=ST.HLP_PRINT_INFORMATIONAL_STATEMENT)
Parser.add_option("-s", "--silent", action="store_true", dest="opt_slient", help=ST.HLP_RETURN_NO_DISPLAY)
Parser.add_option("-q", "--quiet", action="store_true", dest="opt_quiet", help=ST.HLP_RETURN_AND_DISPLAY)
Parser.add_option("-i", "--install", action="store", type="string", dest="Install_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_INSTALL)
Parser.add_option("-c", "--create", action="store", type="string", dest="Create_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_CREATE)
Parser.add_option("-r", "--remove", action="store", type="string", dest="Remove_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_REMOVE)
Parser.add_option("-t", "--template", action="store", type="string", dest="Package_Information_Data_File",
help=ST.HLP_SPECIFY_TEMPLATE_NAME_CREATE)
Parser.add_option("-p", "--dec-filename", action="append", type="string", dest="EDK2_DEC_Filename",
help=ST.HLP_SPECIFY_DEC_NAME_CREATE)
Parser.add_option("-m", "--inf-filename", action="append", type="string", dest="EDK2_INF_Filename",
help=ST.HLP_SPECIFY_INF_NAME_CREATE)
Parser.add_option("-l", "--list", action="store_true", dest="List_Dist_Installed",
help=ST.HLP_LIST_DIST_INSTALLED)
Parser.add_option("-f", "--force", action="store_true", dest="Yes", help=ST.HLP_DISABLE_PROMPT)
Parser.add_option("-n", "--custom-path", action="store_true", dest="CustomPath", help=ST.HLP_CUSTOM_PATH_PROMPT)
Parser.add_option("-x", "--free-lock", action="store_true", dest="SkipLock", help=ST.HLP_SKIP_LOCK_CHECK)
Parser.add_option("-u", "--replace", action="store", type="string", dest="Replace_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_REPLACE)
Parser.add_option("-o", "--original", action="store", type="string", dest="Original_Distribution_Package_File",
help=ST.HLP_SPECIFY_PACKAGE_NAME_TO_BE_REPLACED)
Parser.add_option("--use-guided-paths", action="store_true", dest="Use_Guided_Paths", help=ST.HLP_USE_GUIDED_PATHS)
Parser.add_option("-j", "--test-install", action="append", type="string",
dest="Test_Install_Distribution_Package_Files", help=ST.HLP_TEST_INSTALL)
Opt = Parser.parse_args()[0]
Var2Var = [
("PackageInformationDataFile", Opt.Package_Information_Data_File),
("PackFileToInstall", Opt.Install_Distribution_Package_File),
("PackFileToCreate", Opt.Create_Distribution_Package_File),
("PackFileToRemove", Opt.Remove_Distribution_Package_File),
("PackageFileList", Opt.EDK2_DEC_Filename),
("ModuleFileList", Opt.EDK2_INF_Filename),
("InventoryWs", Opt.List_Dist_Installed),
("PackFileToReplace", Opt.Replace_Distribution_Package_File),
("PackFileToBeReplaced", Opt.Original_Distribution_Package_File),
("UseGuidedPkgPath", Opt.Use_Guided_Paths),
("TestDistFiles", Opt.Test_Install_Distribution_Package_Files)
]
for Var in Var2Var:
setattr(Opt, Var[0], Var[1])
try:
GlobalData.gWORKSPACE, GlobalData.gPACKAGE_PATH = GetWorkspace()
except FatalError, XExcept:
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + format_exc())
return XExcept.args[0]
# Support WORKSPACE is a long path
# Only works for windows system
if pf.system() == 'Windows':
Vol = 'B:'
for Index in range(90, 65, -1):
Vol = chr(Index) + ':'
if not os.path.isdir(Vol):
os.system('subst %s "%s"' % (Vol, GlobalData.gWORKSPACE))
break
GlobalData.gWORKSPACE = '%s\\' % Vol
WorkspaceDir = GlobalData.gWORKSPACE
SetLogLevel(Opt)
Mgr = FileHook.RecoverMgr(WorkspaceDir)
FileHook.SetRecoverMgr(Mgr)
GlobalData.gDB = IpiDatabase(os.path.normpath(os.path.join(WorkspaceDir, \
"Conf/DistributionPackageDatabase.db")), WorkspaceDir)
GlobalData.gDB.InitDatabase(Opt.SkipLock)
#
# Make sure the Db will get closed correctly
#
try:
ReturnCode = 0
CheckConflictOption(Opt)
RunModule = None
if Opt.PackFileToCreate:
if Opt.PackageInformationDataFile:
if not os.path.exists(Opt.PackageInformationDataFile):
if not os.path.exists(os.path.join(WorkspaceDir, Opt.PackageInformationDataFile)):
Logger.Error("\nUPT", FILE_NOT_FOUND, ST.ERR_NO_TEMPLATE_FILE % Opt.PackageInformationDataFile)
else:
Opt.PackageInformationDataFile = os.path.join(WorkspaceDir, Opt.PackageInformationDataFile)
else:
Logger.Error("UPT", OPTION_MISSING, ExtraData=ST.ERR_REQUIRE_T_OPTION)
if not Opt.PackFileToCreate.endswith('.dist'):
Logger.Error("CreatePkg", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToCreate)
RunModule = MkPkg.Main
elif Opt.PackFileToInstall:
if not Opt.PackFileToInstall.endswith('.dist'):
Logger.Error("InstallPkg", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToInstall)
AbsPath = GetFullPathDist(Opt.PackFileToInstall, WorkspaceDir)
if not AbsPath:
Logger.Error("InstallPkg", FILE_NOT_FOUND, ST.ERR_INSTALL_DIST_NOT_FOUND % Opt.PackFileToInstall)
Opt.PackFileToInstall = AbsPath
setattr(Opt, 'PackageFile', Opt.PackFileToInstall)
RunModule = InstallPkg.Main
elif Opt.PackFileToRemove:
if not Opt.PackFileToRemove.endswith('.dist'):
Logger.Error("RemovePkg", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToRemove)
head, tail = os.path.split(Opt.PackFileToRemove)
if head or not tail:
Logger.Error("RemovePkg",
FILE_TYPE_MISMATCH,
ExtraData=ST.ERR_DIST_FILENAME_ONLY_FOR_REMOVE % Opt.PackFileToRemove)
setattr(Opt, 'DistributionFile', Opt.PackFileToRemove)
RunModule = RmPkg.Main
elif Opt.InventoryWs:
RunModule = InventoryWs.Main
elif Opt.PackFileToBeReplaced and not Opt.PackFileToReplace:
Logger.Error("ReplacePkg", OPTION_MISSING, ExtraData=ST.ERR_REQUIRE_U_OPTION)
elif Opt.PackFileToReplace:
if not Opt.PackFileToReplace.endswith('.dist'):
Logger.Error("ReplacePkg", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToReplace)
if not Opt.PackFileToBeReplaced:
Logger.Error("ReplacePkg", OPTION_MISSING, ExtraData=ST.ERR_REQUIRE_O_OPTION)
if not Opt.PackFileToBeReplaced.endswith('.dist'):
Logger.Error("ReplacePkg",
FILE_TYPE_MISMATCH,
ExtraData=ST.ERR_DIST_EXT_ERROR % Opt.PackFileToBeReplaced)
head, tail = os.path.split(Opt.PackFileToBeReplaced)
if head or not tail:
Logger.Error("ReplacePkg",
FILE_TYPE_MISMATCH,
ExtraData=ST.ERR_DIST_FILENAME_ONLY_FOR_REPLACE_ORIG % Opt.PackFileToBeReplaced)
AbsPath = GetFullPathDist(Opt.PackFileToReplace, WorkspaceDir)
if not AbsPath:
Logger.Error("ReplacePkg", FILE_NOT_FOUND, ST.ERR_REPLACE_DIST_NOT_FOUND % Opt.PackFileToReplace)
Opt.PackFileToReplace = AbsPath
RunModule = ReplacePkg.Main
elif Opt.Test_Install_Distribution_Package_Files:
for Dist in Opt.Test_Install_Distribution_Package_Files:
if not Dist.endswith('.dist'):
Logger.Error("TestInstall", FILE_TYPE_MISMATCH, ExtraData=ST.ERR_DIST_EXT_ERROR % Dist)
setattr(Opt, 'DistFiles', Opt.Test_Install_Distribution_Package_Files)
RunModule = TestInstall.Main
else:
Parser.print_usage()
return OPTION_MISSING
ReturnCode = RunModule(Opt)
except FatalError, XExcept:
ReturnCode = XExcept.args[0]
if Logger.GetLevel() <= Logger.DEBUG_9:
Logger.Quiet(ST.MSG_PYTHON_ON % (python_version(), platform) + \
format_exc())
finally:
try:
if ReturnCode != 0 and ReturnCode != UPT_ALREADY_INSTALLED_ERROR:
Logger.Quiet(ST.MSG_RECOVER_START)
GlobalData.gDB.RollBack()
Mgr.rollback()
Logger.Quiet(ST.MSG_RECOVER_DONE)
else:
GlobalData.gDB.Commit()
Mgr.commit()
except StandardError:
Logger.Quiet(ST.MSG_RECOVER_FAIL)
GlobalData.gDB.CloseDb()
if pf.system() == 'Windows':
os.system('subst %s /D' % GlobalData.gWORKSPACE.replace('\\',''))
return ReturnCode
## GetFullPathDist
#
# This function will check DistFile existence, if not absolute path, then try current working directory,
# then $(WORKSPACE),and return the AbsPath. If file doesn't find, then return None
#
# @param DistFile: The distribution file in either relative path or absolute path
# @param WorkspaceDir: Workspace Directory
# @return AbsPath: The Absolute path of the distribution file if existed, None else
#
def GetFullPathDist(DistFile, WorkspaceDir):
if os.path.isabs(DistFile):
if not (os.path.exists(DistFile) and os.path.isfile(DistFile)):
return None
else:
return DistFile
else:
AbsPath = os.path.normpath(os.path.join(os.getcwd(), DistFile))
if not (os.path.exists(AbsPath) and os.path.isfile(AbsPath)):
AbsPath = os.path.normpath(os.path.join(WorkspaceDir, DistFile))
if not (os.path.exists(AbsPath) and os.path.isfile(AbsPath)):
return None
return AbsPath
if __name__ == '__main__':
RETVAL = Main()
#
# 0-127 is a safe return range, and 1 is a standard default error
#
if RETVAL < 0 or RETVAL > 127:
RETVAL = 1
sys.exit(RETVAL)
|
|
import json
import yaml
from conda_build_missing import build, find_all_recipes, sort_dependency_order
import os
from conda_build.config import config as conda_bld_config
import conda_build
import conda.api
from conda.utils import url_path
from contextlib import contextmanager
import logging
import conda_manifest.config
import conda_manifest.core_vn_matrix as vn_matrix
import argparse
import conda.config
from conda.api import get_index
from conda_manifest.env_recipes import load_envs
from conda_manifest.sources import load_sources
stdoutlog = logging.getLogger('conda-manager.stdoutlog')
def build_null(meta):
"""
A build function which can be used instead of a real
build function. Quick for testing...
"""
meta.meta.setdefault('build', {})['script'] = 'echo "Hello!"'
build(meta, test=False)
def resolve_index(src_indices, env_sources):
"""
Given the indices for all sources, produce an index with
filtered packages based on the sources specification.
"""
pkg_names_handled = []
index = {}
for sources in env_sources:
pkgs_handled_at_this_level = []
for source in sources:
for tar_name, pkg_info in src_indices[source].items():
name = pkg_info['name']
if name in pkg_names_handled:
continue
pkgs_handled_at_this_level.append(name)
if tar_name in index:
raise ValueError('Conflicting package information for {} '
'from {} and {}.'
''.format(tar_name,
index[tar_name].get('channel'),
pkg_info.get('channel')))
index[tar_name] = pkg_info.copy()
# Put the source into the pkg_info.
index[tar_name]['source'] = source
pkg_names_handled.extend(pkgs_handled_at_this_level)
return index
@contextmanager
def fixed_get_index(desired_index):
"""
No matter what, get_index should return the desired_index,
and nothing else.
"""
orig_get_index = conda.api.get_index
def new_get_index(*args, **kwargs):
return desired_index
conda.api.get_index = conda_build.build.get_index = new_get_index
yield
conda.api.get_index = conda_build.build.get_index = orig_get_index
@contextmanager
def conda_build_croot_for_source(source_name):
"""
Change the conda build build_root/croot for the lifetime of the context
manager.
"""
orig_build_root = conda_bld_config.croot
conda_bld_config.croot = conda_manifest.config.src_distributions_dir(source_name)
conda_bld_config.bldpkgs_dir = os.path.join(conda_bld_config.croot,
conda.config.subdir)
yield
conda_bld_config.croot = orig_build_root
conda_bld_config.bldpkgs_dir = os.path.join(conda_bld_config.croot,
conda.config.subdir)
def compute_source_indices(env_sources):
"""Generate a dictionary mapping source name to source index."""
src_index = {}
for sources in env_sources:
for source_name in sources:
with conda_build_croot_for_source(source_name):
if os.path.exists(conda_bld_config.bldpkgs_dir):
# Get hold of just the built packages.
src_urls = [url_path(conda_bld_config.croot)]
index = conda.api.get_index(src_urls, prepend=False)
src_index[source_name] = index
else:
# raise ValueError('Source "{}" not found. Consider fetching'
# ' the sources first.'.format(source_name))
src_index[source_name] = {}
return src_index
if __name__ == '__main__':
parser = argparse.ArgumentParser("Pull together the environment recipes "
"directory.")
parser.add_argument("--sources", default='sources.yaml',
help="Location of sources.yaml")
parser.add_argument("--envs", nargs='+', default=['env.specs/*.yaml'],
help="Glob pattern of environment yamls.")
if 1 or conda_manifest.config.DEBUG:
args = parser.parse_args(['--envs', '../env.specs/lts.yaml',
'--sources', '../sources.yaml'])
else:
args = parser.parse_args()
sources = load_sources(args.sources)
envs = load_envs(args.envs)
for env in envs:
env_sources = env['sources']
orig_build_root = conda_bld_config.croot
channels = []
for sources in env_sources:
for source_name in sources:
source_build_directory = conda_manifest.config.src_distributions_dir(source_name)
s = os.path.join(source_build_directory, conda.config.subdir)
if not os.path.exists(s):
os.makedirs(s)
import conda_build.index
conda_build.index.update_index(s)
channels.append(url_path(source_build_directory))
conda.config.rc['channels'] = channels
print 'Channels:', channels
env_recipe_dir = conda_manifest.config.env_recipes_dir(env=env)
metas = list(find_all_recipes([env_recipe_dir]))
metas = sort_dependency_order(metas)
packages_with_version = ['{} {}'.format(meta.name(), meta.version())
for meta in metas]
stdoutlog.debug('Metas will be looked at in the following order:\n{}\n---------'
''.format('\n'.join(packages_with_version)))
src_index = {}
src_index = compute_source_indices(env_sources)
index = resolve_index(src_index, env_sources)
# r = conda.resolve.Resolve(index)
for meta in metas[:]:
remaining_package_names = [m.name() for m in metas]
metas.remove(meta)
stdoutlog.debug('Starting to look at: {} {}\n'.format(meta.name(), meta.version()))
# Build up the index for each package as we go. If we've just built
# a package that this package needs, the index needs to be up to
# date.
src_index = compute_source_indices(env_sources)
index = resolve_index(src_index, env_sources)
with open(os.path.join(meta.path, 'source.json'), 'r') as fh:
source = json.load(fh)
source_name = source['name']
version_matrix = vn_matrix.special_case_version_matrix(meta, index)
print 'BEFORE FILTERING: ', version_matrix
# There is no point keeping the matrix entries which are not needed by the target envs.
env_constraints_excluding_unbuilt = [spec
for spec in env['packages']
if conda.resolve.MatchSpec(spec).name not in remaining_package_names]
version_matrix = list(vn_matrix.filter_cases(version_matrix, index, env_constraints_excluding_unbuilt))
print 'AFTER FILTERING: ', version_matrix
for case in vn_matrix.conda_special_versions(meta, index, version_matrix):
if meta.dist() + '.tar.bz2' not in src_index[source_name]:
stdoutlog.info('Building {} from {}.\n'
''.format(meta.name(), source_name))
with conda_build_croot_for_source(source_name):
# print conda_bld_config.croot
# print conda.config.rc['channels']
print 'BUILDING IN:', conda_bld_config.croot
with conda_manifest.config.pipe_check_call(os.path.join(meta.path,
'build.{}.log'.format(conda.config.subdir))):
# with fixed_get_index(index):
build(meta, channels, test=True)
# TODO: If the test fails, we should remove the build file.
# build_null(meta)
else:
# print src_index, src_index[source_name]
# print conda_bld_config.bldpkgs_dir, conda_bld_config.croot
stdoutlog.info('Not building {} from {}, as it has already been '
'built.\n'.format(meta.name(), source_name))
|
|
# -*- coding: utf-8 -*-
"""Tests for the replace script and ReplaceRobot class."""
#
# (C) Pywikibot team, 2015-2020
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import pywikibot
from pywikibot import fixes
from pywikibot.tools import suppress_warnings
from scripts import replace
from tests import join_data_path
from tests.aspects import unittest
from tests.bot_tests import TWNBotTestCase
from tests.utils import empty_sites
# Load only the custom fixes
fixes.fixes.clear()
fixes._load_file(join_data_path('fixes.py'))
class TestReplacementsMain(TWNBotTestCase):
"""Test various calls of main()."""
SUMMARY_CONFIRMATION = (
'Press Enter to use this automatic message, or enter a '
'description of the\nchanges your bot will make:')
family = 'wikipedia'
code = 'test'
cached = False
def setUp(self):
"""Replace the original bot class with a fake one."""
class FakeReplaceBot(replace.ReplaceRobot):
"""A fake bot class for the minimal support."""
changed_pages = -42 # show that weird number to show this was used
def __init__(inner_self, *args, **kwargs): # noqa: N805
# Unpatch already here, as otherwise super calls will use
# this class' super which is the class itself
replace.ReplaceRobot = self._original_bot
super(FakeReplaceBot, inner_self).__init__(*args, **kwargs)
self.bots.append(inner_self)
def run(inner_self): # noqa: N805
"""Nothing to do here."""
inner_self.changed_pages = -47 # show that run was called
def patched_login(sysop=False):
"""Do nothing."""
pass
def patched_site(*args, **kwargs):
"""Patching a Site instance replacing it's login."""
site = self._original_site(*args, **kwargs)
site.login = patched_login
return site
super(TestReplacementsMain, self).setUp()
self._original_bot = replace.ReplaceRobot
self._original_input = replace.pywikibot.input
self._original_site = replace.pywikibot.Site
self.bots = []
self.inputs = []
replace.ReplaceRobot = FakeReplaceBot
replace.pywikibot.input = self._fake_input
replace.pywikibot.Site = patched_site
def tearDown(self):
"""Bring back the old bot class."""
replace.ReplaceRobot = self._original_bot
replace.pywikibot.input = self._original_input
replace.pywikibot.Site = self._original_site
with empty_sites():
super(TestReplacementsMain, self).tearDown()
def _fake_input(self, message):
"""Cache the message and return static text "TESTRUN"."""
self.inputs.append(message)
return 'TESTRUN'
def _run(self, *args):
"""Run the L{replace.main} with the given args and summary and page."""
# -page to not have an empty generator
# -lang and -family as it will use Site() otherwise
return replace.main(*(args + ('-lang:test', '-family:wikipedia',
'-page:TEST')))
def test_invalid_replacements(self):
"""Test invalid command line replacement configurations."""
# old and new need to be together
self.assertFalse(self._run('foo', '-pairsfile:/dev/null', 'bar'))
# only old provided
with empty_sites():
self.assertFalse(self._run('foo'))
# In the end no bots should've been created
self.assertFalse(self.bots)
def _test_replacement(self, replacement, clazz=replace.Replacement,
offset=0):
"""Test a replacement from the command line."""
self.assertIsInstance(replacement, clazz)
self.assertEqual(replacement.old, str(offset * 2 + 1))
if not callable(replacement.new):
self.assertEqual(replacement.new, str(offset * 2 + 2))
def _test_fix_replacement(self, replacement,
length=1, offset=0, msg=False):
"""Test a replacement from a fix."""
assert length > offset
self._test_replacement(replacement, replace.ReplacementListEntry,
offset)
if msg:
self.assertEqual(replacement.edit_summary,
'M{0}'.format(offset + 1))
else:
self.assertIs(replacement.edit_summary,
replacement.fix_set.edit_summary)
self.assertIs(replacement.fix_set, replacement.container)
self.assertIsInstance(replacement.fix_set, replace.ReplacementList)
self.assertIsInstance(replacement.fix_set, list)
self.assertIn(replacement, replacement.fix_set)
self.assertIs(replacement, replacement.fix_set[offset])
self.assertLength(replacement.fix_set, length)
def _get_bot(self, only_confirmation, *args):
"""Run with arguments, assert and return one bot."""
self.assertIsNone(self._run(*args))
self.assertLength(self.bots, 1)
bot = self.bots[0]
if only_confirmation is not None:
self.assertIn(self.SUMMARY_CONFIRMATION, self.inputs)
if only_confirmation is True:
self.assertLength(self.inputs, 1)
else:
self.assertNotIn(self.SUMMARY_CONFIRMATION, self.inputs)
self.assertEqual(bot.site, self.site)
self.assertEqual(bot.changed_pages, -47)
return bot
def _apply(self, bot, expected, missing=None, title='Test page'):
"""Test applying a test change."""
applied = set()
if missing is True:
required_applied = set()
else:
required_applied = set(bot.replacements)
if missing:
required_applied -= set(missing)
# shouldn't be edited anyway
page = pywikibot.Page(self.site, title)
self.assertEqual(expected,
bot.apply_replacements('Hello 1', applied, page))
self.assertEqual(applied, required_applied)
with suppress_warnings('scripts.replace.ReplaceRobot.doReplacements'):
self.assertEqual(expected, bot.doReplacements('Hello 1', page))
def test_only_cmd(self):
"""Test command line replacements only."""
bot = self._get_bot(True, '1', '2')
self.assertLength(bot.replacements, 1)
self._test_replacement(bot.replacements[0])
def test_cmd_automatic(self):
"""Test command line replacements with automatic summary."""
bot = self._get_bot(None, '1', '2', '-automaticsummary')
self.assertLength(bot.replacements, 1)
self._test_replacement(bot.replacements[0])
self.assertEqual(self.inputs, [])
def test_only_fix_global_message(self):
"""Test fixes replacements only."""
bot = self._get_bot(None, '-fix:has-msg')
self.assertLength(bot.replacements, 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_global_message_tw(self):
"""Test fixes replacements only."""
bot = self._get_bot(None, '-fix:has-msg-tw')
self.assertLength(bot.replacements, 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_no_message(self):
"""Test fixes replacements only."""
bot = self._get_bot(True, '-fix:no-msg')
self.assertLength(bot.replacements, 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_all_replacement_summary(self):
"""Test fixes replacements only."""
bot = self._get_bot(None, '-fix:all-repl-msg')
self.assertLength(bot.replacements, 1)
self._test_fix_replacement(bot.replacements[0], msg=True)
def test_only_fix_partial_replacement_summary(self):
"""Test fixes replacements only."""
bot = self._get_bot(True, '-fix:partial-repl-msg')
for offset, replacement in enumerate(bot.replacements):
self._test_fix_replacement(replacement, 2, offset, offset == 0)
self.assertLength(bot.replacements, 2)
def test_only_fix_multiple(self):
"""Test fixes replacements only."""
bot = self._get_bot(None, '-fix:has-msg-multiple')
for offset, replacement in enumerate(bot.replacements):
self._test_fix_replacement(replacement, 3, offset)
self.assertLength(bot.replacements, 3)
def test_cmd_and_fix(self):
"""Test command line and fix replacements together."""
bot = self._get_bot(True, '1', '2', '-fix:has-msg')
self.assertLength(bot.replacements, 2)
self._test_replacement(bot.replacements[0])
self._test_fix_replacement(bot.replacements[1])
def test_except_title(self):
"""Test excepting and requiring a title specific to fix."""
bot = self._get_bot(True, '-fix:no-msg-title-exceptions')
self.assertLength(bot.replacements, 1)
self._test_fix_replacement(bot.replacements[0])
self.assertIn('title', bot.replacements[0].exceptions)
self.assertIn('require-title', bot.replacements[0].exceptions)
self._apply(bot, 'Hello 1', missing=True, title='Neither')
self._apply(bot, 'Hello 2', title='Allowed')
self._apply(bot, 'Hello 1', missing=True, title='Allowed Declined')
def test_fix_callable(self):
"""Test fix replacements using a callable."""
bot = self._get_bot(True, '-fix:no-msg-callable')
self.assertLength(bot.replacements, 1)
self._test_fix_replacement(bot.replacements[0])
self.assertTrue(callable(bot.replacements[0].new))
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
|
"""
Collects information from TekkenGameState over time in hopes of synthesizing it and presenting it in a more useful way.
"""
from MoveInfoEnums import AttackType
from MoveInfoEnums import ThrowTechs
from MoveInfoEnums import ComplexMoveStates
from TekkenGameState import TekkenGameState
import time
from enum import Enum
class TekkenEncyclopedia:
def __init__(self, isPlayerOne = False, print_extended_frame_data = False):
self.FrameData = {}
self.GameEvents = []
self.current_game_event = None
self.isPlayerOne = isPlayerOne
self.print_extended_frame_data = print_extended_frame_data
self.active_frame_wait = 1
self.was_fight_being_reacquired = True
self.is_match_recorded = False
self.stat_filename = "TekkenData/matches.txt"
if self.isPlayerOne:
self.LoadStats()
self.current_punish_window = None
self.PunishWindows = []
self.current_frame_data_entry = None
self.previous_frame_data_entry = None
def LoadStats(self):
self.stat_dict = {}
self.stat_dict['char_stats'] = {}
self.stat_dict['matchup_stats'] = {}
self.stat_dict['opponent_stats'] = {}
try:
with open(self.stat_filename, 'r', encoding='utf-8') as fr:
lines = fr.readlines()
for line in lines:
if '|' in line:
args = line.split('|')
result = args[0].strip()
player_char = args[2].strip()
opponent_name = args[4].strip()
opponent_char = args[5].strip()
self.AddStat(result, player_char, opponent_name, opponent_char)
except FileNotFoundError:
pass
def AddStat(self, result, player_char, opponent_name, opponent_char):
if not opponent_char in self.stat_dict['char_stats']:
self.stat_dict['char_stats'][opponent_char] = [0, 0, 0]
if not opponent_name in self.stat_dict['opponent_stats']:
self.stat_dict['opponent_stats'][opponent_name] = [0, 0, 0]
matchup_string = "{} vs {}".format(player_char, opponent_char)
if not matchup_string in self.stat_dict['matchup_stats']:
self.stat_dict['matchup_stats'][matchup_string] = [0, 0, 0]
if 'WIN' in result:
index = 0
elif 'LOSS' in result:
index = 1
else:
index = 2
self.stat_dict['char_stats'][opponent_char][index] += 1
self.stat_dict['opponent_stats'][opponent_name][index] += 1
self.stat_dict['matchup_stats'][matchup_string][index] += 1
def RecordFromStat(self, catagory, lookup):
try:
stats = self.stat_dict[catagory][lookup]
wins = stats[0]
losses = stats[1]
draws= stats[2]
except:
wins = 0
losses = 0
draws = 0
if draws <= 0:
return "{} - {}".format(wins, losses)
else:
return "{} - {} - {}".format(wins, losses, draws)
def GetPlayerString(self, reverse = False):
if (self.isPlayerOne and not reverse) or (not self.isPlayerOne and reverse):
return "p1: "
else:
return "p2: "
def GetFrameAdvantage(self, moveId, isOnBlock = True):
if moveId in self.FrameData:
if isOnBlock:
return self.FrameData[moveId].onBlock
else:
return self.FrameData[moveId].onNormalHit
else:
return None
#Set the dummy to jump and hold up and this prints the frame difference.
def CheckJumpFrameDataFallback(self, gameState):
if not self.isPlayerOne:
if gameState.IsFulfillJumpFallbackConditions():
print("p1 jump frame diff: " + str(gameState.GetBotMoveTimer() - gameState.GetOppMoveTimer()))
def Update(self, gameState: TekkenGameState):
if self.isPlayerOne:
gameState.FlipMirror()
#self.CheckJumpFrameDataFallback(gameState)
self.DetermineFrameData(gameState)
self.DetermineGameStats(gameState)
self.DetermineCoachingTips(gameState)
if self.isPlayerOne:
gameState.FlipMirror()
def DetermineCoachingTips(self, gameState: TekkenGameState):
if self.previous_frame_data_entry != self.current_frame_data_entry:
self.previous_frame_data_entry = self.current_frame_data_entry
if self.current_punish_window != None:
self.ClosePunishWindow(PunishWindow.Result.NO_WINDOW, do_close_frame_data_entries=False)
# if int(self.current_frame_data_entry.currentFrameAdvantage) <= 999999:
self.current_punish_window = PunishWindow(self.current_frame_data_entry.prefix,
self.current_frame_data_entry.move_id,
self.current_frame_data_entry.input,
int(self.current_frame_data_entry.hitRecovery),
int(self.current_frame_data_entry.blockRecovery),
int(self.current_frame_data_entry.activeFrames))
self.PunishWindows.append(self.current_punish_window)
self.punish_window_counter = 0
if self.current_punish_window != None:
self.punish_window_counter += 1
#if self.punish_window_counter > self.current_punish_window.size:
was_block_punish = gameState.DidOppStartGettingPunishedXFramesAgo(1) or gameState.DidOppStartGettingHitXFramesAgo(1)
if was_block_punish:
leeway = (gameState.OppFramesUntilRecoveryXFramesAgo(2) - 1)
LAUNCH_PUNISHIBLE = 15
BAD_PUNISH_THRESHOLD = 13
#if leeway == 0:
#self.ClosePunishWindow(PunishWindow.Result.PERFECT_PUNISH)
#else:
fa = (-1 * self.current_punish_window.get_frame_advantage())
startup = fa - leeway
if fa >= LAUNCH_PUNISHIBLE and startup <= BAD_PUNISH_THRESHOLD:
self.ClosePunishWindow(PunishWindow.Result.NO_LAUNCH_ON_LAUNCHABLE)
elif fa >= LAUNCH_PUNISHIBLE:
self.ClosePunishWindow(PunishWindow.Result.LAUNCH_ON_LAUNCHABLE)
else:
self.ClosePunishWindow(PunishWindow.Result.JAB_ON_NOT_LAUNCHABLE)
elif gameState.HasOppReturnedToNeutralFromMoveId(self.current_punish_window.move_id) and self.punish_window_counter >= self.current_punish_window.hit_recovery:
if self.current_punish_window.get_frame_advantage() <= -10:
self.ClosePunishWindow(PunishWindow.Result.NO_PUNISH)
else:
self.ClosePunishWindow(PunishWindow.Result.NO_WINDOW)
if self.current_punish_window != None:
self.current_punish_window.adjust_window(gameState.GetOppFramesTillNextMove(), gameState.GetBotFramesTillNextMove())
#perfect_punish = False
#if was_block_punish:
#perfect_punish = gameState.WasBotMoveOnLastFrameXFramesAgo(2)
def ClosePunishWindow(self, result, do_close_frame_data_entries = True):
self.current_punish_window.close_window(result)
self.current_punish_window = None
if do_close_frame_data_entries:
self.previous_frame_data_entry = None
self.current_frame_data_entry = None
def DetermineGameStats(self, gameState: TekkenGameState):
frames_ago = 4
if self.current_game_event == None:
if gameState.DidOppComboCounterJustStartXFramesAgo(frames_ago):
gameState.BackToTheFuture(frames_ago)
combo_counter_damage = gameState.GetOppComboDamageXFramesAgo(1)
was_unblockable = gameState.IsOppAttackUnblockable()
was_antiair = gameState.IsOppAttackAntiair()
was_block_punish = gameState.DidBotStartGettingPunishedXFramesAgo(1)
perfect_punish = False
if was_block_punish:
perfect_punish = gameState.BotFramesUntilRecoveryXFramesAgo(2) == 1
was_counter_hit = gameState.IsBotGettingCounterHit()
was_ground_hit = gameState.IsBotGettingHitOnGround()
was_whiff_punish = gameState.GetBotStartupXFramesAgo(2) > 0
was_low_hit = gameState.IsOppAttackLow()
was_mid_hit_on_crouching = gameState.IsOppAttackMid() and gameState.IsBotCrouching()
was_throw = gameState.IsBotBeingThrown()
was_damaged_during_attack = gameState.DidOppTakeDamageDuringStartup()
gameState.ReturnToPresent()
if was_unblockable:
hit = GameStatEventEntry.EntryType.UNBLOCKABLE
elif was_antiair:
hit = GameStatEventEntry.EntryType.ANTIAIR
elif was_throw:
hit = GameStatEventEntry.EntryType.THROW
elif was_damaged_during_attack:
hit = GameStatEventEntry.EntryType.POWER_CRUSHED
elif was_block_punish:
hit = GameStatEventEntry.EntryType.PUNISH
elif was_counter_hit:
hit = GameStatEventEntry.EntryType.COUNTER
elif was_ground_hit:
hit = GameStatEventEntry.EntryType.GROUND
elif was_whiff_punish:
hit = GameStatEventEntry.EntryType.WHIFF_PUNISH
elif was_low_hit:
hit = GameStatEventEntry.EntryType.LOW
elif was_mid_hit_on_crouching:
hit = GameStatEventEntry.EntryType.MID
else:
hit = GameStatEventEntry.EntryType.NO_BLOCK
self.current_game_event = GameStatEventEntry(gameState.stateLog[-1].timer_frames_remaining, self.GetPlayerString(True), hit, combo_counter_damage)
#print("event open")
else:
bot_damage_taken = gameState.DidBotJustTakeDamage(frames_ago + 1)
if bot_damage_taken > 0:
#print('armored')
game_event = GameStatEventEntry(gameState.stateLog[-1].timer_frames_remaining, self.GetPlayerString(True), GameStatEventEntry.EntryType.ARMORED, 0) #this is probably gonna break for Yoshimitsu's self damage moves
game_event.close_entry(gameState.stateLog[-1].timer_frames_remaining, 1, bot_damage_taken, 0, len(self.GameEvents))
self.GameEvents.append(game_event)
else:
if gameState.DidOppComboCounterJustEndXFramesAgo(frames_ago) or gameState.WasFightReset():
hits = gameState.GetOppComboHitsXFramesAgo(frames_ago + 1)
damage = gameState.GetOppComboDamageXFramesAgo(frames_ago + 1)
juggle = gameState.GetOppJuggleDamageXFramesAgo(frames_ago + 1)
self.current_game_event.close_entry(gameState.stateLog[-1].timer_frames_remaining, hits, damage, juggle, len(self.GameEvents))
self.GameEvents.append(self.current_game_event)
self.current_game_event = None
#print("event closed")
if gameState.WasFightReset():
#print("p1: NOW:0")
#print("p2: NOW:0")
if self.isPlayerOne:
if gameState.gameReader.flagToReacquireNames == False and self.was_fight_being_reacquired:
self.is_match_recorded = False
for entry in self.get_matchup_record(gameState):
print(entry)
round_number = gameState.GetRoundNumber()
print("!ROUND | {} | HIT".format(round_number))
if (gameState.stateLog[-1].bot.wins == 3 or gameState.stateLog[-1].opp.wins == 3) and not self.is_match_recorded:
self.is_match_recorded = True
player_name = "You"
p1_char_name = gameState.stateLog[-1].opp.character_name
p1_wins = gameState.stateLog[-1].opp.wins
opponent_name = gameState.stateLog[-1].opponent_name
p2_char_name = gameState.stateLog[-1].bot.character_name
p2_wins = gameState.stateLog[-1].bot.wins
if gameState.stateLog[-1].is_player_player_one:
player_char, player_wins = p1_char_name, p1_wins
opponent_char, opponent_wins = p2_char_name, p2_wins
else:
player_char, player_wins = p2_char_name, p2_wins
opponent_char, opponent_wins = p1_char_name, p1_wins
if player_wins == opponent_wins:
result = 'DRAW'
elif player_wins > opponent_wins:
result = 'WIN'
else:
result = "LOSS"
match_result = '{} | {} | {} | vs | {} | {} | {}-{} | {}'.format(result, player_name, player_char, opponent_name, opponent_char, player_wins, opponent_wins, time.strftime('%Y_%m_%d_%H.%M'))
print("{}".format(match_result))
self.AddStat(result, player_char, opponent_name, opponent_char)
with open(self.stat_filename, "a", encoding='utf-8') as fa:
fa.write(match_result + '\n')
if (gameState.GetTimer(frames_ago) < 3600 and len(self.GameEvents) > 0) or True:
summary = RoundSummary(self.GameEvents, gameState.GetOppRoundSummary(frames_ago))
self.GameEvents = []
self.was_fight_being_reacquired = gameState.gameReader.flagToReacquireNames
def get_matchup_record(self, gameState):
if gameState.stateLog[-1].is_player_player_one:
opponent_char = gameState.stateLog[-1].bot.character_name
player_char = gameState.stateLog[-1].opp.character_name
else:
opponent_char = gameState.stateLog[-1].opp.character_name
player_char = gameState.stateLog[-1].bot.character_name
opponent_name = gameState.stateLog[-1].opponent_name
return [
("!RECORD | vs {}: {}".format(opponent_char, self.RecordFromStat('char_stats', opponent_char))),
("!RECORD | vs {}: {}".format(opponent_name, self.RecordFromStat('opponent_stats', opponent_name))),
("!RECORD | {} vs {}: {}".format(player_char, opponent_char, self.RecordFromStat("matchup_stats", "{} vs {}".format(player_char, opponent_char))))
]
def DetermineFrameData(self, gameState):
if (gameState.IsBotBlocking() or gameState.IsBotGettingHit() or gameState.IsBotBeingThrown() or gameState.IsBotBeingKnockedDown() or gameState.IsBotBeingWallSplatted()): #or gameState.IsBotUsingOppMovelist()): #or gameState.IsBotStartedBeingJuggled() or gameState.IsBotJustGrounded()):
# print(gameState.stateLog[-1].bot.move_id)
#print(gameState.stateLog[-1].bot.move_timer)
#print(gameState.stateLog[-1].bot.recovery)
#print(gameState.DidBotIdChangeXMovesAgo(self.active_frame_wait))
if gameState.DidBotIdChangeXMovesAgo(self.active_frame_wait) or gameState.DidBotTimerInterruptXMovesAgo(
self.active_frame_wait): # or gameState.DidOppIdChangeXMovesAgo(self.active_frame_wait):
is_recovering_before_long_active_frame_move_completes = (gameState.GetBotRecovery() - gameState.GetBotMoveTimer() == 0)
gameState.BackToTheFuture(self.active_frame_wait)
#print(gameState.GetOppActiveFrames())
if (not self.active_frame_wait >= gameState.GetOppActiveFrames() + 1) and not is_recovering_before_long_active_frame_move_completes:
self.active_frame_wait += 1
else:
gameState.ReturnToPresent()
currentActiveFrame = gameState.GetLastActiveFrameHitWasOn(self.active_frame_wait)
gameState.BackToTheFuture(self.active_frame_wait)
opp_id = gameState.GetOppMoveId()
if opp_id in self.FrameData:
frameDataEntry = self.FrameData[opp_id]
else:
frameDataEntry = FrameDataEntry(self.print_extended_frame_data)
self.FrameData[opp_id] = frameDataEntry
frameDataEntry.currentActiveFrame = currentActiveFrame
frameDataEntry.currentFrameAdvantage = '??'
frameDataEntry.move_id = opp_id
# frameDataEntry.damage =
frameDataEntry.damage = gameState.GetOppDamage()
frameDataEntry.startup = gameState.GetOppStartup()
if frameDataEntry.damage == 0 and frameDataEntry.startup == 0:
frameDataEntry.startup, frameDataEntry.damage = gameState.GetOppLatestNonZeroStartupAndDamage()
frameDataEntry.activeFrames = gameState.GetOppActiveFrames()
frameDataEntry.hitType = AttackType(gameState.GetOppAttackType()).name
if gameState.IsOppAttackThrow():
frameDataEntry.hitType += "_THROW"
frameDataEntry.recovery = gameState.GetOppRecovery()
#frameDataEntry.input = frameDataEntry.InputTupleToInputString(gameState.GetOppLastMoveInput())
frameDataEntry.input = gameState.GetCurrentOppMoveString()
frameDataEntry.technical_state_reports = gameState.GetOppTechnicalStates(frameDataEntry.startup - 1)
frameDataEntry.tracking = gameState.GetOppTrackingType(frameDataEntry.startup)
#print(gameState.GetRangeOfMove())
gameState.ReturnToPresent()
#frameDataEntry.throwTech = gameState.GetBotThrowTech(frameDataEntry.activeFrames + frameDataEntry.startup)
frameDataEntry.throwTech = gameState.GetBotThrowTech(1)
time_till_recovery_opp = gameState.GetOppFramesTillNextMove()
time_till_recovery_bot = gameState.GetBotFramesTillNextMove()
new_frame_advantage_calc = time_till_recovery_bot - time_till_recovery_opp
frameDataEntry.currentFrameAdvantage = frameDataEntry.WithPlusIfNeeded(new_frame_advantage_calc)
if gameState.IsBotBlocking():
frameDataEntry.onBlock = new_frame_advantage_calc
else:
if gameState.IsBotGettingCounterHit():
frameDataEntry.onCounterHit = new_frame_advantage_calc
else:
frameDataEntry.onNormalHit = new_frame_advantage_calc
frameDataEntry.hitRecovery = time_till_recovery_opp
frameDataEntry.blockRecovery = time_till_recovery_bot
frameDataEntry.move_str = gameState.GetCurrentOppMoveName()
frameDataEntry.prefix = self.GetPlayerString()
print(str(frameDataEntry))
self.current_frame_data_entry = frameDataEntry
gameState.BackToTheFuture(self.active_frame_wait)
self.active_frame_wait = 1
gameState.ReturnToPresent()
class FrameDataEntry:
def __init__(self, print_extended = False):
self.print_extended = print_extended
self.prefix = '??'
self.move_id = '??'
self.move_str = '??'
self.startup = '??'
self.calculated_startup = -1
self.hitType = '??'
self.onBlock = '??'
self.onCounterHit = '??'
self.onNormalHit = '??'
self.recovery = '??'
self.damage = '??'
self.blockFrames = '??'
self.activeFrames = '??'
self.currentFrameAdvantage = '??'
self.currentActiveFrame = '??'
self.input = '??'
self.technical_state_reports = []
self.blockRecovery = '??'
self.hitRecovery = '??'
self.throwTech = None
self.tracking = ComplexMoveStates.F_MINUS
def WithPlusIfNeeded(self, value):
try:
if value >= 0:
return '+' + str(value)
else:
return str(value)
except:
return str(value)
def InputTupleToInputString(self, inputTuple):
s = ""
for input in inputTuple:
s += (input[0].name + input[1].name.replace('x', '+')).replace('N', '')
if input[2]:
s += "+R"
return s
def __repr__(self):
notes = ''
if self.throwTech != None and self.throwTech != ThrowTechs.NONE:
notes += self.throwTech.name + " "
self.calculated_startup = self.startup
for report in self.technical_state_reports:
#if not self.print_extended:
if 'TC' in report.name and report.is_present():
notes += str(report)
elif 'TJ' in report.name and report.is_present():
notes += str(report)
elif 'PC' in report.name and report.is_present():
notes += str(report)
elif 'SKIP' in report.name and report.is_present():
#print(report)
self.calculated_startup -= report.total_present()
elif 'FROZ' in report.name and report.is_present():
#print(report)
self.calculated_startup -= report.total_present()
elif self.print_extended:
if report.is_present():
notes += str(report)
nerd_string = ""
if self.print_extended:
pass
#notes += ' stun {}'.format(self.blockRecovery)
#notes += ' a_recovery {}'.format(self.hitRecovery)
#notes += "Total:" + str(self.recovery) + "f "
if self.calculated_startup != self.startup:
self.calculated_startup = str(self.calculated_startup) + "?"
non_nerd_string = "{:^5}|{:^4}|{:^4}|{:^7}|{:^4}|{:^4}|{:^4}|{:^5}|{:^3}|{:^2}|{:^3}|{:^3}|{:^3}|".format(
str(self.input),
str(self.move_id),
self.move_str,
str(self.hitType)[:7],
str(self.calculated_startup),
self.WithPlusIfNeeded(self.onBlock),
self.WithPlusIfNeeded(self.onNormalHit),
self.WithPlusIfNeeded(self.onCounterHit),
(str(self.currentActiveFrame) + "/" + str(self.activeFrames)),
self.tracking.name.replace('_MINUS', '-').replace("_PLUS", '+').replace(ComplexMoveStates.UNKN.name, '?'),
self.recovery,
self.hitRecovery,
self.blockRecovery
)
notes_string = "{}".format(notes)
now_string = " NOW:{}".format(str(self.currentFrameAdvantage))
return self.prefix + non_nerd_string + notes_string + now_string
class GameStatEventEntry:
class EntryType(Enum):
COUNTER = 1
PUNISH = 2
WHIFF_PUNISH = 3
LOW = 4
MID = 5
THROW = 6
GROUND = 7
NO_BLOCK = 8
ARMORED = 10
UNBLOCKABLE = 12
ANTIAIR = 14
POWER_CRUSHED = 15
#Not implemented
LOW_PARRY = 9
OUT_OF_THE_AIR = 13
class PunishType(Enum):
NONE = 0
PERFECT = 1
JAB = 2
JAB_ON_LAUNCH_PUNISHIBLE = 3
def __init__(self, time_in_frames, player_string, hit_type : EntryType, combo_counter_damage):
self.start_time = time_in_frames
self.player_string = player_string
self.hit_type = hit_type
self.damage_already_on_combo_counter = combo_counter_damage
def close_entry(self, time_in_frames, total_hits, total_damage, juggle_damage, times_hit):
self.end_time = time_in_frames
self.total_hits = total_hits
self.total_damage = max(0, total_damage - self.damage_already_on_combo_counter)
self.juggle_damage = juggle_damage
print('{} {} | {} | {} | {} | {} | HIT'.format(self.player_string, self.hit_type.name, self.total_damage, self.total_hits, self.start_time, self.end_time))
class RoundSummary:
def __init__(self, events, round_variables):
self.events = events
self.collated_events = self.collate_events(events)
total_damage = 0
sources, types = self.collated_events
#print('{} combos for {} damage'.format(types[0][0], types[0][1]))
#print('{} pokes for {} damage'.format(types[1][0], types[1][1]))
for event, hits, damage in sources:
if damage > 0:
#print('{} {} for {} damage'.format(hits, event.name, damage))
total_damage += damage
#print('total damage dealt {} ({})'.format(round_variables[1], total_damage))
def collate_events(self, events):
hits_into_juggles = 0
hits_into_pokes = 0
damage_from_juggles = 0
damage_from_pokes = 0
sources = []
for entry in GameStatEventEntry.EntryType:
occurances = 0
damage = 0
for event in events:
if entry == event.hit_type:
occurances += 1
damage += event.total_damage
if event.juggle_damage > 0:
damage_from_juggles += event.total_damage
hits_into_juggles += 1
else:
damage_from_pokes += event.total_damage
hits_into_pokes += 1
sources.append((entry, occurances, damage))
sources.sort(key=lambda x: x[2], reverse=True)
types = [(hits_into_juggles, damage_from_juggles), (hits_into_pokes, damage_from_pokes)]
return sources, types
def __repr__(self):
pass
class PunishWindow:
class Result(Enum):
NO_WINDOW = 0
NO_PUNISH = 1
PERFECT_PUNISH = 2
NO_LAUNCH_ON_LAUNCHABLE = 3
LAUNCH_ON_LAUNCHABLE = 4
JAB_ON_NOT_LAUNCHABLE = 5
NOT_YET_CLOSED = 99
def __init__(self, prefix, move_id, string_name, hit_recovery, block_recovery, active_frames ):
self.prefix = prefix
self.move_id = move_id
self.name = string_name
self.hit_recovery = hit_recovery
self.block_recovery = block_recovery
self.active_frames = active_frames
self.is_window_locked = False
self.original_diff = self.get_frame_advantage()
self.upcoming_lock = False
self.frames_locked = 0
self.result = PunishWindow.Result.NOT_YET_CLOSED
def get_frame_advantage(self):
if not self.is_window_locked:
return self.block_recovery - self.hit_recovery
else:
return 0 - self.hit_recovery - self.frames_locked
def adjust_window(self, hit_recovery, block_recovery):
#if block_recovery > self.block_recovery:
self.hit_recovery = hit_recovery
if self.upcoming_lock:
self.frames_locked += 1
self.is_window_locked = True
if not self.is_window_locked:
self.block_recovery = block_recovery
if block_recovery == 0:
self.upcoming_lock = True
if self.get_frame_advantage() != self.original_diff:
print('{} NOW:{}'.format(self.prefix, FrameDataEntry.WithPlusIfNeeded(None, self.get_frame_advantage())))
self.original_diff = self.get_frame_advantage()
def close_window(self, result : Result):
self.result = result
if result != PunishWindow.Result.NO_WINDOW:
print("Closing punish window, result: {}".format(self.result.name))
|
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
""" CSW request and response processor """
import warnings
import io
import random
from urllib.parse import urlencode
from urllib.request import urlopen
from owslib.etree import etree
from owslib import fes
from owslib import util
from owslib import ows
from owslib.iso import MD_Metadata
from owslib.fgdc import Metadata
from owslib.dif import DIF
from owslib.namespaces import Namespaces
from owslib.util import cleanup_namespaces, bind_url
# default variables
outputformat = 'application/xml'
def get_namespaces():
n = Namespaces()
return n.get_namespaces()
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd'
schema_location = '%s %s' % (namespaces['csw'], schema)
class CatalogueServiceWeb:
""" csw request class """
def __init__(self, url, lang='en-US', version='2.0.2', timeout=10, skip_caps=False):
"""
Construct and process a GetCapabilities request
Parameters
----------
- url: the URL of the CSW
- lang: the language (default is 'en-US')
- version: version (default is '2.0.2')
- timeout: timeout in seconds
- skip_caps: whether to skip GetCapabilities processing on init (default is False)
"""
self.url = url
self.lang = lang
self.version = version
self.timeout = timeout
self.service = 'CSW'
self.exceptionreport = None
self.owscommon = ows.OwsCommon('1.0.0')
if not skip_caps: # process GetCapabilities
# construct request
data = {'service': self.service, 'version': self.version, 'request': 'GetCapabilities'}
self.request = '%s%s' % (bind_url(self.url), urlencode(data))
self._invoke()
if self.exceptionreport is None:
# ServiceIdentification
val = self._exml.find(util.nspath_eval('ows:ServiceIdentification', namespaces))
self.identification=ows.ServiceIdentification(val,self.owscommon.namespace)
# ServiceProvider
val = self._exml.find(util.nspath_eval('ows:ServiceProvider', namespaces))
self.provider=ows.ServiceProvider(val,self.owscommon.namespace)
# ServiceOperations metadata
self.operations=[]
for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Operation', namespaces)):
self.operations.append(ows.OperationsMetadata(elem, self.owscommon.namespace))
# FilterCapabilities
val = self._exml.find(util.nspath_eval('ogc:Filter_Capabilities', namespaces))
self.filters=fes.FilterCapabilities(val)
def describerecord(self, typename='csw:Record', format=outputformat):
"""
Construct and process DescribeRecord request
Parameters
----------
- typename: the typename to describe (default is 'csw:Record')
- format: the outputFormat (default is 'application/xml')
"""
# construct request
node0 = self._setrootelement('csw:DescribeRecord')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set('outputFormat', format)
node0.set('schemaLanguage', namespaces['xs2'])
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:TypeName', namespaces)).text = typename
self.request = node0
self._invoke()
# parse result
# TODO: process the XML Schema (you're on your own for now with self.response)
def getdomain(self, dname, dtype='parameter'):
"""
Construct and process a GetDomain request
Parameters
----------
- dname: the value of the Parameter or Property to query
- dtype: whether to query a parameter (parameter) or property (property)
"""
# construct request
dtypename = 'ParameterName'
node0 = self._setrootelement('csw:GetDomain')
node0.set('service', self.service)
node0.set('version', self.version)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
if dtype == 'property':
dtypename = 'PropertyName'
etree.SubElement(node0, util.nspath_eval('csw:%s' % dtypename, namespaces)).text = dname
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
val = self._exml.find(util.nspath_eval('csw:DomainValues', namespaces)).attrib.get('type')
self.results['type'] = util.testXMLValue(val, True)
val = self._exml.find(util.nspath_eval('csw:DomainValues/csw:%s' % dtypename, namespaces))
self.results[dtype] = util.testXMLValue(val)
# get the list of values associated with the Domain
self.results['values'] = []
for f in self._exml.findall(util.nspath_eval('csw:DomainValues/csw:ListOfValues/csw:Value', namespaces)):
self.results['values'].append(util.testXMLValue(f))
def getrecords(self, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None, esn='summary', sortby=None, outputschema=namespaces['csw'], format=outputformat, startposition=0, maxrecords=10, cql=None, xml=None, resulttype='results'):
"""
Construct and process a GetRecords request
Parameters
----------
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- typenames: the typeNames to query against (default is csw:Record)
- propertyname: the PropertyName to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')
- sortby: property to sort results on
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
- startposition: requests a slice of the result set, starting at this position (default is 0)
- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)
- cql: common query language text. Note this overrides bbox, qtype, keywords
- xml: raw XML request. Note this overrides all other options
- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')
"""
warnings.warn("""Please use the updated 'getrecords2' method instead of 'getrecords'.
The 'getrecords' method will be upgraded to use the 'getrecords2' parameters
in a future version of OWSLib.""")
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('resultType', resulttype)
node0.set('service', self.service)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, None)
if sortby is not None:
fes.setsortby(node1, sortby)
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
self.results['nextrecord'] = int(util.testXMLValue(val, True))
# process list of matching records
self.records = {}
self._parserecords(outputschema, esn)
def getrecordbyid(self, id=[], esn='full', outputschema=namespaces['csw'], format=outputformat):
"""
Construct and process a GetRecordById request
Parameters
----------
- id: the list of Ids
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'full')
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
"""
# construct request
data = {
'service': self.service,
'version': self.version,
'request': 'GetRecordById',
'outputFormat': format,
'outputSchema': outputschema,
'elementsetname': esn,
'id': '',
}
self.request = '%s%s%s' % (bind_url(self.url), urlencode(data), ','.join(id))
self._invoke()
if self.exceptionreport is None:
self.results = {}
self.records = {}
self._parserecords(outputschema, esn)
def getrecords2(self, constraints=[], sortby=None, typenames='csw:Record', esn='summary', outputschema=namespaces['csw'], format=outputformat, startposition=0, maxrecords=10, cql=None, xml=None, resulttype='results'):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: the list of constraints (OgcExpression from owslib.fes module)
- sortby: an OGC SortBy object (SortBy from owslib.fes module)
- typenames: the typeNames to query against (default is csw:Record)
- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')
- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')
- format: the outputFormat (default is 'application/xml')
- startposition: requests a slice of the result set, starting at this position (default is 0)
- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)
- cql: common query language text. Note this overrides bbox, qtype, keywords
- xml: raw XML request. Note this overrides all other options
- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')
"""
if xml is not None:
self.request = etree.fromstring(xml)
val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))
if val is not None:
esn = util.testXMLValue(val)
else:
# construct request
node0 = self._setrootelement('csw:GetRecords')
if etree.__name__ != 'lxml.etree': # apply nsmap manually
node0.set('xmlns:ows', namespaces['ows'])
node0.set('xmlns:gmd', namespaces['gmd'])
node0.set('xmlns:dif', namespaces['dif'])
node0.set('xmlns:fgdc', namespaces['fgdc'])
node0.set('outputSchema', outputschema)
node0.set('outputFormat', format)
node0.set('version', self.version)
node0.set('service', self.service)
node0.set('resultType', resulttype)
if startposition > 0:
node0.set('startPosition', str(startposition))
node0.set('maxRecords', str(maxrecords))
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))
node1.set('typeNames', typenames)
etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn
if len(constraints) > 0:
node2 = etree.SubElement(node1, util.nspath_eval('csw:Constraint', namespaces))
node2.set('version', '1.1.0')
flt = fes.FilterRequest()
node2.append(flt.setConstraintList(constraints))
# Now add a CQL filter if passed in
if cql is not None:
etree.SubElement(node2, util.nspath_eval('csw:CqlText', namespaces)).text = cql
if sortby is not None and isinstance(sortby, fes.SortBy):
node1.append(sortby)
self.request = node0
self._invoke()
if self.exceptionreport is None:
self.results = {}
# process search results attributes
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')
self.results['matches'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')
self.results['returned'] = int(util.testXMLValue(val, True))
val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')
self.results['nextrecord'] = int(util.testXMLValue(val, True))
# process list of matching records
self.records = {}
self._parserecords(outputschema, esn)
def transaction(self, ttype=None, typename='csw:Record', record=None, propertyname=None, propertyvalue=None, bbox=None, keywords=[], cql=None, identifier=None):
"""
Construct and process a Transaction request
Parameters
----------
- ttype: the type of transaction 'insert, 'update', 'delete'
- typename: the typename to describe (default is 'csw:Record')
- record: the XML record to insert
- propertyname: the RecordProperty/PropertyName to Filter against
- propertyvalue: the RecordProperty Value to Filter against (for updates)
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- keywords: list of keywords
- cql: common query language text. Note this overrides bbox, qtype, keywords
- identifier: record identifier. Note this overrides bbox, qtype, keywords, cql
"""
# construct request
node0 = self._setrootelement('csw:Transaction')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
validtransactions = ['insert', 'update', 'delete']
if ttype not in validtransactions: # invalid transaction
raise RuntimeError('Invalid transaction \'%s\'.' % ttype)
node1 = etree.SubElement(node0, util.nspath_eval('csw:%s' % ttype.capitalize(), namespaces))
if ttype != 'update':
node1.set('typeName', typename)
if ttype == 'insert':
if record is None:
raise RuntimeError('Nothing to insert.')
node1.append(etree.fromstring(record))
if ttype == 'update':
if record is not None:
node1.append(etree.fromstring(record))
else:
if propertyname is not None and propertyvalue is not None:
node2 = etree.SubElement(node1, util.nspath_eval('csw:RecordProperty', namespaces))
etree.SubElement(node2, util.nspath_eval('csw:Name', namespaces)).text = propertyname
etree.SubElement(node2, util.nspath_eval('csw:Value', namespaces)).text = propertyvalue
self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, identifier)
if ttype == 'delete':
self._setconstraint(node1, None, propertyname, keywords, bbox, cql, identifier)
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
self._parsetransactionsummary()
self._parseinsertresult()
def harvest(self, source, resourcetype, resourceformat=None, harvestinterval=None, responsehandler=None):
"""
Construct and process a Harvest request
Parameters
----------
- source: a URI to harvest
- resourcetype: namespace identifying the type of resource
- resourceformat: MIME type of the resource
- harvestinterval: frequency of harvesting, in ISO8601
- responsehandler: endpoint that CSW should responsd to with response
"""
# construct request
node0 = self._setrootelement('csw:Harvest')
node0.set('version', self.version)
node0.set('service', self.service)
node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
etree.SubElement(node0, util.nspath_eval('csw:Source', namespaces)).text = source
etree.SubElement(node0, util.nspath_eval('csw:ResourceType', namespaces)).text = resourcetype
if resourceformat is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResourceFormat', namespaces)).text = resourceformat
if harvestinterval is not None:
etree.SubElement(node0, util.nspath_eval('csw:HarvestInterval', namespaces)).text = harvestinterval
if responsehandler is not None:
etree.SubElement(node0, util.nspath_eval('csw:ResponseHandler', namespaces)).text = responsehandler
self.request = node0
self._invoke()
self.results = {}
if self.exceptionreport is None:
val = self._exml.find(util.nspath_eval('csw:Acknowledgement', namespaces))
if util.testXMLValue(val) is not None:
ts = val.attrib.get('timeStamp')
self.timestamp = util.testXMLValue(ts, True)
id = val.find(util.nspath_eval('csw:RequestId', namespaces))
self.id = util.testXMLValue(id)
else:
self._parsetransactionsummary()
self._parseinsertresult()
def getService_urls(self, service_string=None):
"""
Return easily identifiable URLs for all service types
Parameters
----------
- service_string: a URI to lookup
"""
urls=[]
for key,rec in self.records.items():
#create a generator object, and iterate through it until the match is found
#if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
def _parseinsertresult(self):
self.results['insertresults'] = []
for i in self._exml.findall(util.nspath_eval('csw:InsertResult', namespaces)):
for j in i.findall(util.nspath_eval('csw:BriefRecord/dc:identifier', namespaces)):
self.results['insertresults'].append(util.testXMLValue(j))
def _parserecords(self, outputschema, esn):
if outputschema == namespaces['gmd']: # iso 19139
for i in self._exml.findall('.//'+util.nspath_eval('gmd:MD_Metadata', namespaces)):
val = i.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = MD_Metadata(i)
elif outputschema == namespaces['fgdc']: # fgdc csdgm
for i in self._exml.findall('.//metadata'):
val = i.find('idinfo/datasetid')
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = Metadata(i)
elif outputschema == namespaces['dif']: # nasa dif
for i in self._exml.findall('.//'+util.nspath_eval('dif:DIF', namespaces)):
val = i.find(util.nspath_eval('dif:Entry_ID', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = DIF(i)
else: # process default
for i in self._exml.findall('.//'+util.nspath_eval('csw:%s' % self._setesnel(esn), namespaces)):
val = i.find(util.nspath_eval('dc:identifier', namespaces))
identifier = self._setidentifierkey(util.testXMLValue(val))
self.records[identifier] = CswRecord(i)
def _parsetransactionsummary(self):
val = self._exml.find(util.nspath_eval('csw:TransactionSummary', namespaces))
if val is not None:
rid = val.attrib.get('requestId')
self.results['requestid'] = util.testXMLValue(rid, True)
ts = val.find(util.nspath_eval('csw:totalInserted', namespaces))
self.results['inserted'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalUpdated', namespaces))
self.results['updated'] = int(util.testXMLValue(ts))
ts = val.find(util.nspath_eval('csw:totalDeleted', namespaces))
self.results['deleted'] = int(util.testXMLValue(ts))
def _setesnel(self, esn):
""" Set the element name to parse depending on the ElementSetName requested """
el = 'Record'
if esn == 'brief':
el = 'BriefRecord'
if esn == 'summary':
el = 'SummaryRecord'
return el
def _setidentifierkey(self, el):
if el is None:
return 'owslib_random_%i' % random.randint(1,65536)
else:
return el
def _setrootelement(self, el):
if etree.__name__ == 'lxml.etree': # apply nsmap
return etree.Element(util.nspath_eval(el, namespaces), nsmap=namespaces)
else:
return etree.Element(util.nspath_eval(el, namespaces))
def _setconstraint(self, parent, qtype=None, propertyname='csw:AnyText', keywords=[], bbox=None, cql=None, identifier=None):
if keywords or bbox is not None or qtype is not None or cql is not None or identifier is not None:
node0 = etree.SubElement(parent, util.nspath_eval('csw:Constraint', namespaces))
node0.set('version', '1.1.0')
if identifier is not None: # set identifier filter, overrides all other parameters
flt = fes.FilterRequest()
node0.append(flt.set(identifier=identifier))
elif cql is not None: # send raw CQL query
# CQL passed, overrides all other parameters
node1 = etree.SubElement(node0, util.nspath_eval('csw:CqlText', namespaces))
node1.text = cql
else: # construct a Filter request
flt = fes.FilterRequest()
node0.append(flt.set(qtype=qtype, keywords=keywords, propertyname=propertyname,bbox=bbox))
def _invoke(self):
# do HTTP request
if isinstance(self.request, str): # GET KVP
self.response = urlopen(self.request, timeout=self.timeout).read()
else:
self.request = cleanup_namespaces(self.request)
self.request = util.xml2string(etree.tostring(self.request))
self.response = util.http_post(self.url, self.request, self.lang, self.timeout)
# parse result see if it's XML
self._exml = etree.parse(io.StringIO(self.response))
# it's XML. Attempt to decipher whether the XML response is CSW-ish """
valid_xpaths = [
util.nspath_eval('ows:ExceptionReport', namespaces),
util.nspath_eval('csw:Capabilities', namespaces),
util.nspath_eval('csw:DescribeRecordResponse', namespaces),
util.nspath_eval('csw:GetDomainResponse', namespaces),
util.nspath_eval('csw:GetRecordsResponse', namespaces),
util.nspath_eval('csw:GetRecordByIdResponse', namespaces),
util.nspath_eval('csw:HarvestResponse', namespaces),
util.nspath_eval('csw:TransactionResponse', namespaces)
]
if self._exml.getroot().tag not in valid_xpaths:
raise RuntimeError('Document is XML, but not CSW-ish')
# check if it's an OGC Exception
val = self._exml.find(util.nspath_eval('ows:Exception', namespaces))
if val is not None:
raise ows.ExceptionReport(self._exml, self.owscommon.namespace)
else:
self.exceptionreport = None
class CswRecord(object):
""" Process csw:Record, csw:BriefRecord, csw:SummaryRecord """
def __init__(self, record):
if hasattr(record, 'getroot'): # standalone document
self.xml = etree.tostring(record.getroot())
else: # part of a larger document
self.xml = etree.tostring(record)
# check to see if Dublin Core record comes from
# rdf:RDF/rdf:Description container
# (child content model is identical)
self.rdf = False
rdf = record.find(util.nspath_eval('rdf:Description', namespaces))
if rdf is not None:
self.rdf = True
record = rdf
# some CSWs return records with multiple identifiers based on
# different schemes. Use the first dc:identifier value to set
# self.identifier, and set self.identifiers as a list of dicts
val = record.find(util.nspath_eval('dc:identifier', namespaces))
self.identifier = util.testXMLValue(val)
self.identifiers = []
for i in record.findall(util.nspath_eval('dc:identifier', namespaces)):
d = {}
d['scheme'] = i.attrib.get('scheme')
d['identifier'] = i.text
self.identifiers.append(d)
val = record.find(util.nspath_eval('dc:type', namespaces))
self.type = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:title', namespaces))
self.title = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:alternative', namespaces))
self.alternative = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:isPartOf', namespaces))
self.ispartof = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:abstract', namespaces))
self.abstract = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:date', namespaces))
self.date = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:created', namespaces))
self.created = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:issued', namespaces))
self.issued = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:relation', namespaces))
self.relation = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:temporal', namespaces))
self.temporal = util.testXMLValue(val)
self.uris = [] # list of dicts
for i in record.findall(util.nspath_eval('dc:URI', namespaces)):
uri = {}
uri['protocol'] = util.testXMLValue(i.attrib.get('protocol'), True)
uri['name'] = util.testXMLValue(i.attrib.get('name'), True)
uri['description'] = util.testXMLValue(i.attrib.get('description'), True)
uri['url'] = util.testXMLValue(i)
self.uris.append(uri)
self.references = [] # list of dicts
for i in record.findall(util.nspath_eval('dct:references', namespaces)):
ref = {}
ref['scheme'] = util.testXMLValue(i.attrib.get('scheme'), True)
ref['url'] = util.testXMLValue(i)
self.references.append(ref)
val = record.find(util.nspath_eval('dct:modified', namespaces))
self.modified = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:creator', namespaces))
self.creator = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:publisher', namespaces))
self.publisher = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:coverage', namespaces))
self.coverage = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:contributor', namespaces))
self.contributor = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:language', namespaces))
self.language = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:source', namespaces))
self.source = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:rightsHolder', namespaces))
self.rightsholder = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:accessRights', namespaces))
self.accessrights = util.testXMLValue(val)
val = record.find(util.nspath_eval('dct:license', namespaces))
self.license = util.testXMLValue(val)
val = record.find(util.nspath_eval('dc:format', namespaces))
self.format = util.testXMLValue(val)
self.subjects = []
for i in record.findall(util.nspath_eval('dc:subject', namespaces)):
self.subjects.append(util.testXMLValue(i))
self.rights = []
for i in record.findall(util.nspath_eval('dc:rights', namespaces)):
self.rights.append(util.testXMLValue(i))
val = record.find(util.nspath_eval('dct:spatial', namespaces))
self.spatial = util.testXMLValue(val)
val = record.find(util.nspath_eval('ows:BoundingBox', namespaces))
if val is not None:
self.bbox = ows.BoundingBox(val, namespaces['ows'])
else:
self.bbox = None
val = record.find(util.nspath_eval('ows:WGS84BoundingBox', namespaces))
if val is not None:
self.bbox_wgs84 = ows.WGS84BoundingBox(val, namespaces['ows'])
else:
self.bbox_wgs84 = None
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import sys
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from cloudbaseinit.openstack.common import cfg
from cloudbaseinit.openstack.common.gettextutils import _
from cloudbaseinit.openstack.common import network_utils
from cloudbaseinit.openstack.common.rpc import amqp as rpc_amqp
from cloudbaseinit.openstack.common.rpc import common as rpc_common
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password'),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
cfg.BoolOpt('rabbit_durable_queues',
default=False,
help='use durable queues in RabbitMQ'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect"""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
Messages will automatically be acked if the callback doesn't
raise an exception
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
try:
callback(message.payload)
message.ack()
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started"""
try:
self.queue.cancel(self.tag)
except KeyError, e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'"""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.rabbit_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'"""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class"""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection"""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg):
"""Send a message"""
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'"""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'"""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.rabbit_durable_queues,
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'"""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'"""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
params_list.append(params)
self.params_list = params_list
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params
should be used for the connection (if any)"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
if not ssl_params:
# Just have the default behavior
return True
else:
# Return the extended behavior
return ssl_params
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.close()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
params = self.params_list[attempt % len(self.params_list)]
attempt += 1
try:
self._connect(params)
return
except (IOError, self.connection_errors) as e:
pass
except Exception, e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
LOG.error(_('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info)
# NOTE(comstud): Copied from original code. There's
# really no better recourse because if this was a queue we
# need to consume on, we have no way to consume anymore.
sys.exit(1)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError), e:
if error_callback:
error_callback(e)
except Exception, e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues"""
return self.channel
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next())
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers"""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.exception(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1]
queues_tail = self.consumers[-1]
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread"""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def publisher_send(self, cls, topic, msg, **kwargs):
"""Send to a publisher based on the publisher class"""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer"""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg):
"""Send a 'topic' message"""
self.publisher_send(TopicPublisher, topic, msg)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic"""
self.publisher_send(NotifyPublisher, topic, msg, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers"""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread"""
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def create_connection(conf, new=True):
"""Create a connection"""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
|
# -*- coding: utf-8 -*-
import urllib
import re
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from cms.exceptions import NoHomeFound
from cms.models.pagemodel import Page
from cms.utils.urlutils import any_path_re
ADMIN_PAGE_RE_PATTERN = ur'cms/page/(\d+)'
ADMIN_PAGE_RE = re.compile(ADMIN_PAGE_RE_PATTERN)
def use_draft(request):
"""
Decision function to determine if the drafts or public pages should be used
Public models are used unless looking at preview or edit versions of the page.
"""
preview_draft = 'preview' in request.GET and 'draft' in request.GET
edit_mode = 'edit' in request.GET
return preview_draft or edit_mode
def get_page_queryset(request=None):
if request and use_draft(request):
return Page.objects.drafts()
return Page.objects.public()
def get_page_queryset_from_path(path, preview=False, draft=False, site=None):
""" Returns a queryset of pages corresponding to the path given
In may returns None or a single page is no page is present or root path is given
"""
if 'django.contrib.admin' in settings.INSTALLED_APPS:
admin_base = reverse('admin:index')
else:
admin_base = None
# Check if this is called from an admin request
if admin_base and path.startswith(admin_base):
# if so, get the page ID to request it directly
match = ADMIN_PAGE_RE.search(path)
if not match:
page = None
else:
try:
page = Page.objects.get(pk=match.group(1))
except Page.DoesNotExist:
page = None
return page
if not site:
site = Site.objects.get_current()
# PageQuerySet.published filter on page site.
# We have to explicitly filter on site only in preview mode
if draft:
pages = Page.objects.drafts().filter(site=site)
elif preview:
pages = Page.objects.public().filter(site=site)
else:
pages = Page.objects.public().published(site)
# Check if there are any pages
if not pages.all_root().exists():
return None
# get the home page (needed to get the page)
try:
home = pages.get_home(site=site)
except NoHomeFound:
home = None
# if there is no path (slashes stripped) and we found a home, this is the
# home page.
if not path and home:
page = home
return page
# title_set__path=path should be clear, get the pages where the path of the
# title object is equal to our path.
return pages.filter(title_set__path=path).distinct()
def get_page_from_path(path, preview=False, draft=False):
""" Resolves a url path to a single page object.
Raises exceptions is page does not exist or multiple pages are found
"""
page_qs = get_page_queryset_from_path(path, preview, draft)
if page_qs is not None:
if isinstance(page_qs, Page):
return page_qs
try:
page = page_qs.get()
except Page.DoesNotExist:
return None
return page
else:
return None
def get_page_from_request(request, use_path=None):
"""
Gets the current page from a request object.
URLs can be of the following form (this should help understand the code):
http://server.whatever.com/<some_path>/"pages-root"/some/page/slug
<some_path>: This can be anything, and should be stripped when resolving
pages names. This means the CMS is not installed at the root of the
server's URLs.
"pages-root" This is the root of Django urls for the CMS. It is, in essence
an empty page slug (slug == '')
The page slug can then be resolved to a Page model object
"""
# The following is used by cms.middleware.page.CurrentPageMiddleware
if hasattr(request, '_current_page_cache'):
return request._current_page_cache
draft = use_draft(request)
preview = 'preview' in request.GET
# If non-staff user, any request for preview/edit results in a "not found"
# This is to avoid confusing the toolbar logic into thinking it has an
# editable version
if draft and not request.user.is_authenticated():
return None
# If use_path is given, someone already did the path cleaning
if use_path is not None:
path = use_path
else:
path = request.path
pages_root = urllib.unquote(reverse("pages-root"))
# otherwise strip off the non-cms part of the URL
if 'django.contrib.admin' in settings.INSTALLED_APPS:
admin_base = reverse('admin:index')
else:
admin_base = None
if path.startswith(pages_root) and (not admin_base or not path.startswith(admin_base)):
path = path[len(pages_root):]
# and strip any final slash
if path.endswith("/"):
path = path[:-1]
page = get_page_from_path(path, preview, draft)
if draft and page and not page.has_change_permission(request):
return None
request._current_page_cache = page
return page
def is_valid_url(url, instance, create_links=True, site=None):
""" Checks for conflicting urls
"""
page_root = urllib.unquote(reverse("pages-root"))
if url and url != page_root:
# Url sanity check via regexp
if not any_path_re.match(url):
raise ValidationError(_('Invalid URL, use /my/url format.'))
# We only check page FK to site object to allow is_valid_url check on
# incomplete Page instances
if not site and instance.site_id:
site = instance.site
# Retrieve complete queryset of pages with corresponding URL
# This uses the same resolving function as ``get_page_from_path``
if url.startswith(page_root):
url = url[len(page_root):]
page_qs = get_page_queryset_from_path(url.strip('/'), site=site)
url_clashes = []
# If queryset has pages checks for conflicting urls
if page_qs is not None:
# If single page is returned create a list for interface compat
if isinstance(page_qs, Page):
page_qs = [page_qs]
for page in page_qs:
# Every page in the queryset except the current one is a conflicting page
# We have to exclude both copies of the page
if page and page.publisher_public.pk != instance.pk:
if create_links:
# Format return message with page url
url_clashes.append('<a href="%(page_url)s%(pk)s" target="_blank">%(page_title)s</a>' % {
'page_url': reverse('admin:cms_page_changelist'), 'pk': page.pk,
'page_title': force_unicode(page),
})
else:
# Just return the page name
url_clashes.append("'%s'" % page)
if url_clashes:
# If clashing pages exist raise the exception
raise ValidationError(mark_safe(
ungettext_lazy('Page %(pages)s has the same url \'%(url)s\' as current page "%(instance)s".',
'Pages %(pages)s have the same url \'%(url)s\' as current page "%(instance)s".',
len(url_clashes)) %
{'pages': ', '.join(url_clashes), 'url': url, 'instance': instance}))
return True
|
|
# -*- coding: utf-8 -*-
from collections import deque
from nereid import render_template, route
from nereid.globals import session, request, current_app
from nereid.helpers import slugify, url_for
from nereid import jsonify, Markup, current_locale
from nereid.contrib.pagination import Pagination
from nereid.contrib.sitemap import SitemapIndex, SitemapSection
from werkzeug.exceptions import NotFound
from flask.ext.babel import format_currency
from trytond.model import ModelSQL, ModelView, fields
from trytond.pyson import Eval, Not, Bool
from trytond.pool import Pool, PoolMeta
from sql import Null
__all__ = [
'Product', 'ProductsRelated', 'ProductTemplate',
'ProductMedia', 'ProductCategory'
]
DEFAULT_STATE = {'invisible': Not(Bool(Eval('displayed_on_eshop')))}
DEFAULT_STATE2 = {
'invisible': Not(Bool(Eval('displayed_on_eshop'))),
'required': Bool(Eval('displayed_on_eshop')),
}
class ProductMedia(ModelSQL, ModelView):
"Product Media"
__name__ = "product.media"
sequence = fields.Integer("Sequence", required=True, select=True)
static_file = fields.Many2One(
"nereid.static.file", "Static File", required=True, select=True)
product = fields.Many2One("product.product", "Product", select=True)
template = fields.Many2One("product.template", "Template", select=True)
url = fields.Function(fields.Char("URL"), "get_url")
def get_url(self, name):
return self.static_file.url
@classmethod
def __setup__(cls):
super(ProductMedia, cls).__setup__()
cls._order.insert(0, ('sequence', 'ASC'))
@staticmethod
def default_sequence():
return 10
class ProductTemplate:
__metaclass__ = PoolMeta
__name__ = "product.template"
products_displayed_on_eshop = fields.Function(
fields.One2Many('product.product', None, 'Products (Disp. on eShop)'),
'get_products_displayed_on_eshop'
)
long_description = fields.Text('Long Description')
description = fields.Text("Description")
media = fields.One2Many("product.media", "template", "Media")
images = fields.Function(
fields.One2Many('nereid.static.file', None, 'Images'),
getter='get_template_images'
)
def get_template_images(self, name=None):
"""
Getter for `images` function field
"""
template_images = []
for media in self.media:
if media.static_file.mimetype and \
'image' in media.static_file.mimetype:
template_images.append(media.static_file.id)
return template_images
def get_products_displayed_on_eshop(self, name=None):
"""
Return the variants that are displayed on eshop
"""
Product = Pool().get('product.product')
return map(
int,
Product.search([
('template', '=', self.id),
('displayed_on_eshop', '=', True),
])
)
class Product:
"Product extension for Nereid"
__metaclass__ = PoolMeta
__name__ = "product.product"
#: Decides the number of products that would be remebered.
recent_list_size = 5
#: The list of fields allowed to be sent back on a JSON response from the
#: application. This is validated before any product info is built
#:
#: The `name`, `sale_price`, `id` and `uri` are sent by default
#:
#: .. versionadded:: 0.3
json_allowed_fields = set(['rec_name', 'sale_price', 'id', 'uri'])
uri = fields.Char(
'URI', select=True, states=DEFAULT_STATE2
)
displayed_on_eshop = fields.Boolean('Displayed on E-Shop?', select=True)
long_description = fields.Text('Long Description')
media = fields.One2Many("product.media", "product", "Media")
images = fields.Function(
fields.One2Many('nereid.static.file', None, 'Images'),
getter='get_product_images'
)
up_sells = fields.Many2Many(
'product.product-product.product',
'product', 'up_sell', 'Up-Sells', states=DEFAULT_STATE
)
cross_sells = fields.Many2Many(
'product.product-product.product',
'product', 'cross_sell', 'Cross-Sells', states=DEFAULT_STATE
)
default_image = fields.Function(
fields.Many2One('nereid.static.file', 'Image'), 'get_default_image',
)
use_template_description = fields.Boolean("Use template's description")
@classmethod
def view_attributes(cls):
return super(Product, cls).view_attributes() + [
('//page[@id="desc"]', 'states', {
'invisible': Bool(Eval('use_template_description'))
}), ('//page[@id="ecomm_det"]', 'states', {
'invisible': Not(Bool(Eval('displayed_on_eshop')))
}), ('//page[@id="related_products"]', 'states', {
'invisible': Not(Bool(Eval('displayed_on_eshop')))
})]
@classmethod
def copy(cls, products, default=None):
"""Duplicate products
"""
if default is None:
default = {}
default = default.copy()
default['displayed_on_eshop'] = False
duplicate_products = []
for index, product in enumerate(products, start=1):
if product.uri:
default['uri'] = "%s-copy-%d" % (product.uri, index)
duplicate_products.extend(
super(Product, cls).copy([product], default)
)
return duplicate_products
@classmethod
def validate(cls, products):
super(Product, cls).validate(products)
cls.check_uri_uniqueness(products)
def get_default_image(self, name):
"""
Returns default product image if any.
"""
images = self.images or self.template.images
return images[0].id if images else None
@classmethod
def __setup__(cls):
super(Product, cls).__setup__()
cls.description.states['invisible'] = Bool(
Eval('use_template_description')
)
cls._error_messages.update({
'unique_uri': ('URI of Product must be Unique'),
})
cls.per_page = 12
@staticmethod
def default_displayed_on_eshop():
return False
@fields.depends('template', 'uri')
def on_change_with_uri(self):
"""
If the URI is empty, slugify template name into URI
"""
if not self.uri and self.template:
return slugify(self.template.name)
return self.uri
@staticmethod
def default_use_template_description():
return True
@classmethod
def check_uri_uniqueness(cls, products):
"""
Ensure uniqueness of products uri.
"""
query = ['OR']
for product in products:
# Do not check for unique uri if product is marked as
# not displayed on eshop
if not product.displayed_on_eshop:
continue
arg = [
'AND', [
('id', '!=', product.id)
], [
('uri', 'ilike', product.uri)
]
]
query.append(arg)
if query != ['OR'] and cls.search(query):
cls.raise_user_error('unique_uri')
@classmethod
@route('/product/<uri>')
@route('/product/<path:path>/<uri>')
def render(cls, uri, path=None):
"""Renders the template for a single product.
:param uri: URI of the product
:param path: Ignored parameter. This is used in
cases where SEO friendly URL like
product/category/sub-cat/sub-sub-cat/product-uri
are generated
"""
products = cls.search([
('displayed_on_eshop', '=', True),
('uri', '=', uri),
('template.active', '=', True),
], limit=1)
if not products:
return NotFound('Product Not Found')
cls._add_to_recent_list(int(products[0]))
return render_template('product.jinja', product=products[0])
@classmethod
@route('/products/+recent', methods=['GET', 'POST'])
def recent_products(cls):
"""
GET
---
Return a list of recently visited products in JSON
POST
----
Add the product to the recent list manually. This method is required
if the product page is cached, or is served by a Caching Middleware
like Varnish which may clear the session before sending the request to
Nereid.
Just as with GET the response is the AJAX of recent products
"""
if request.method == 'POST':
cls._add_to_recent_list(request.form.get('product_id', type=int))
fields = set(request.args.getlist('fields')) or cls.json_allowed_fields
fields = fields & cls.json_allowed_fields
if 'sale_price' in fields:
fields.remove('sale_price')
response = []
if hasattr(session, 'sid'):
products = cls.browse(session.get('recent-products', []))
for product in products:
product_val = {}
for field in fields:
product_val[field] = getattr(product, field)
product_val['sale_price'] = format_currency(
product.sale_price(),
current_locale.currency.code
)
response.append(product_val)
return jsonify(products=response)
@classmethod
def _add_to_recent_list(cls, product_id):
"""Adds the given product ID to the list of recently viewed products
By default the list size is 5. To change this you can inherit
product.product and set :attr:`recent_list_size` attribute to a
non negative integer value
For faster and easier access the products are stored with the ids alone
this behaviour can be modified by subclassing.
The deque object cannot be saved directly in the cache as its not
serialisable. Hence a conversion to list is made on the fly
.. versionchanged:: 0.3
If there is no session for the user this function returns an empty
list. This ensures that the code is consistent with iterators that
may use the returned value
:param product_id: the product id to prepend to the list
"""
if not hasattr(session, 'sid'):
current_app.logger.warning(
"No session. Not saving to browsing history"
)
return []
recent_products = deque(
session.setdefault('recent-products', []), cls.recent_list_size
)
# XXX: If a product is already in the recently viewed list, but it
# would be nice to remember the recent_products list in the order of
# visits.
if product_id not in recent_products:
recent_products.appendleft(product_id)
session['recent-products'] = list(recent_products)
return recent_products
@classmethod
@route('/products')
@route('/products/<int:page>')
def render_list(cls, page=1):
"""
Renders the list of all products which are displayed_on_shop=True
.. tip::
The implementation uses offset for pagination and could be
extremely resource intensive on databases. Hence you might want to
either have an alternate cache/search server based pagination or
limit the pagination to a maximum page number.
The base implementation does NOT limit this and could hence result
in poor performance
:param page: The page in pagination to be displayed
"""
products = Pagination(cls, [
('displayed_on_eshop', '=', True),
('template.active', '=', True),
], page, cls.per_page)
return render_template('product-list.jinja', products=products)
def sale_price(self, quantity=0):
"""Return the Sales Price.
A wrapper designed to work as a context variable in templating
The price is calculated from the pricelist associated with the current
user. The user in the case of guest user is logged in user. In the
event that the logged in user does not have a pricelist set against
the user, the guest user's pricelist is chosen.
Finally if neither the guest user, nor the regsitered user has a
pricelist set against them then the list price is displayed as the
price of the product
:param quantity: Quantity
"""
return self.list_price
@classmethod
@route('/sitemaps/product-index.xml')
def sitemap_index(cls):
"""
Returns a Sitemap Index Page
"""
index = SitemapIndex(cls, [
('displayed_on_eshop', '=', True),
('template.active', '=', True),
])
return index.render()
@classmethod
@route('/sitemaps/product-<int:page>.xml')
def sitemap(cls, page):
sitemap_section = SitemapSection(
cls, [
('displayed_on_eshop', '=', True),
('template.active', '=', True),
], page
)
sitemap_section.changefreq = 'daily'
return sitemap_section.render()
def get_absolute_url(self, **kwargs):
"""
Return the URL of the current product.
This method works only under a nereid request context
"""
return url_for('product.product.render', uri=self.uri, **kwargs)
def _json(self):
"""
Return a JSON serializable dictionary of the product
"""
response = {
'template': {
'name': self.template.rec_name,
'id': self.template.id,
'list_price': self.list_price,
},
'code': self.code,
'description': self.description,
}
return response
def get_long_description(self):
"""
Get long description of product.
If the product is set to use the template's long description, then
the template long description is sent back.
The returned value is a `~jinja2.Markup` object which makes it
HTML safe and can be used directly in templates. It is recommended
to use this method instead of trying to wrap this logic in the
templates.
"""
if self.use_template_description:
description = self.template.long_description
else:
description = self.long_description
return Markup(description or '')
def get_description(self):
"""
Get description of product.
If the product is set to use the template's description, then
the template description is sent back.
The returned value is a `~jinja2.Markup` object which makes it
HTML safe and can be used directly in templates. It is recommended
to use this method instead of trying to wrap this logic in the
templates.
"""
if self.use_template_description:
description = self.template.description
else:
description = self.description
return Markup(description or '')
def get_product_images(self, name=None):
"""
Getter for `images` function field
"""
product_images = []
for media in self.media:
if not media.static_file.mimetype:
continue
if 'image' in media.static_file.mimetype:
product_images.append(media.static_file.id)
return product_images
def get_images(self):
"""
Get images of product variant.
Fallback to template's images if there are no images
for product.
"""
if self.images:
return self.images
return self.template.images
class ProductsRelated(ModelSQL):
"Related Product"
__name__ = 'product.product-product.product'
_table = 'product_product_rel'
product = fields.Many2One(
'product.product', 'Product',
ondelete='CASCADE', select=True, required=True)
up_sell = fields.Many2One(
'product.product', 'Up-sell Product',
ondelete='CASCADE', select=True)
cross_sell = fields.Many2One(
'product.product', 'Cross-sell Product',
ondelete='CASCADE', select=True)
class ProductCategory:
__metaclass__ = PoolMeta
__name__ = 'product.category'
@staticmethod
def order_rec_name(tables):
table, _ = tables[None]
return [table.parent == Null, table.parent, table.name]
@classmethod
def __setup__(cls):
super(ProductCategory, cls).__setup__()
cls.rec_name.string = "Parent/name"
|
|
#!/usr/bin/env python
from __future__ import division
import argparse
import cPickle as pickle
import codecs
import collections
import logging
import os
import random
import sys
import time
from sklearn import svm, tree
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from config.corpus_paths import paths
from config import config
from reader.Transmir_corpus import get_transmir_gold_ann_set
from reader.bc2gm_corpus import get_b2gm_gold_ann_set
from reader.brat_corpus import get_brat_gold_ann_set
from reader.chemdner_corpus import get_chemdner_gold_ann_set, run_chemdner_evaluation, write_chemdner_files
from reader.genia_corpus import get_genia_gold_ann_set
from reader.jnlpba_corpus import get_jnlpba_gold_ann_set
from reader.mirna_corpus import get_ddi_mirna_gold_ann_set
from reader.mirtext_corpus import get_mirtex_gold_ann_set
from reader.seedev_corpus import get_seedev_gold_ann_set
from reader.tempEval_corpus import get_thymedata_gold_ann_set, write_tempeval_results, run_anafora_evaluation
if config.use_chebi:
from postprocessing import chebi_resolution
from postprocessing.ssm import get_ssm
from postprocessing.ensemble_ner import EnsembleNER
from classification.results import ResultsNER
def get_gold_ann_set(corpus_type, gold_path, entity_type, pair_type, text_path):
if corpus_type == "chemdner":
goldset = get_chemdner_gold_ann_set(gold_path, entity_type, text_path, corpus_type)
elif corpus_type == "tempeval":
goldset = get_thymedata_gold_ann_set(gold_path, entity_type, text_path, corpus_type)
elif corpus_type == "pubmed":
goldset = get_unique_gold_ann_set(gold_path)
elif corpus_type == "genia":
goldset = get_genia_gold_ann_set(gold_path, entity_type)
elif corpus_type == "ddi-mirna":
goldset = get_ddi_mirna_gold_ann_set(gold_path, entity_type, pair_type)
elif corpus_type == "mirtex":
goldset = get_mirtex_gold_ann_set(gold_path, entity_type, pair_type)
elif corpus_type == "seedev":
goldset = get_seedev_gold_ann_set(gold_path, entity_type, pair_type)
elif corpus_type == "jnlpba":
goldset = get_jnlpba_gold_ann_set(gold_path, entity_type)
elif corpus_type == "bc2":
goldset = get_b2gm_gold_ann_set(gold_path, text_path)
elif corpus_type == "transmir":
goldset = get_transmir_gold_ann_set(gold_path, entity_type)
elif corpus_type == "brat":
goldset = get_brat_gold_ann_set(gold_path, entity_type, pair_type)
return goldset
def get_unique_gold_ann_set(goldann):
"""
Load a gold standard consisting of a list of unique entities
:param goldann: path to annotation
:return: Set of gold standard annotations
"""
with codecs.open(goldann, 'r', 'utf-8') as goldfile:
gold = [line.strip() for line in goldfile if line.strip()]
return gold, None
def compare_results(offsets, goldoffsets, corpus, getwords=True, evaltype="entity", entities=[]):
"""
Compare system results with a gold standard, works for both NER and RE
:param offsets: system results dictionary, offset tuples (did, start, end, text): more info
:param goldoffsets: dictionary with the gold standard annotations (did, start, end [, text]): more info
:param corpus: Reference corpus
:return: Lines to write into a report files, set of TPs, FPs and FNs
"""
#TODO: check if size of offsets and goldoffsets tuples is the same
report = []
#if not getwords:
# offsets = set([x[:4] for x in offsets.keys()])
if type(goldoffsets) is set:
goldoffsets = {s: [] for s in goldoffsets}
# goldoffsets = set([x[:4] for x in goldoffsets.keys()])
# print len(goldoffsets), len(offsets)
if len(entities) > 0:
goldoffsets_keys = goldoffsets.keys()
for k in goldoffsets_keys:
if k[0] not in entities or k[1] not in entities[k[0]] or k[2] not in entities[k[0]]:
del goldoffsets[k]
print "excluded ", k
tps = set(offsets.keys()) & set(goldoffsets.keys())
fps = set(offsets.keys()) - set(goldoffsets.keys())
fns = set(goldoffsets.keys()) - set(offsets.keys())
fpreport, fpwords = get_report(fps, corpus, offsets, getwords=getwords)
fnreport, fnwords = get_report(fns, corpus, goldoffsets, getwords=getwords)
tpreport, tpwords = get_report(tps, corpus, offsets, getwords=getwords)
alldocs = set(fpreport.keys())
alldocs = alldocs.union(fnreport.keys())
alldocs = alldocs.union(tpreport.keys())
if getwords:
report.append("Common FPs")
fpcounter = collections.Counter(fpwords)
for w in fpcounter.most_common(10):
report.append(w[0] + ": " + str(w[1]))
report.append(">\n")
report.append("Common FNs")
fncounter = collections.Counter(fnwords)
for w in fncounter.most_common(10):
report.append(w[0] + ": " + str(w[1]))
report.append(">\n")
for d in list(alldocs):
report.append(d)
if d in tpreport:
for x in tpreport[d]:
report.append("TP:%s" % x)
if d in fpreport:
for x in fpreport[d]:
report.append("FP:%s" % x)
if d in fnreport:
for x in fnreport[d]:
report.append("FN:%s" % x)
return report, tps, fps, fns
def get_report(results, corpus, more_info, getwords=True):
"""
Get more information from results.
:return: Lines to write to a report file, word that appear in this set
"""
# TODO: use only offset tuples (did, start, end, text)
# logging.debug(more_info)
report = {}
words = []
for t in results:
if t[0] == "":
did = "0"
else:
did = t[0]
if t[0] != "" and t[0] not in corpus.documents:
logging.debug("this doc is not in the corpus! %s" % t[0])
# logging.info(corpus.documents.keys())
continue
start, end = t[1], t[2]
if getwords:
# doctext = corpus.documents[x[0]].text
# if stype == "T":
# tokentext = corpus.documents[x[0]].title[start:end]
# else:
# tokentext = doctext[start:end]
tokentext = t[3]
words.append(tokentext)
if did not in report:
report[did] = []
if getwords:
# line = u"{}\t{}:{}\t{}\t{}".format(did, start, end, tokentext.encode('utf-8'), "\t".join(more_info[t]))
line = u"{}\t{}:{}\t{}\t{}".format(did, start, end, tokentext, "\t".join([str(s) for s in more_info[t]]))
else:
line = did + '\t' + start + ":" + end
report[did].append(line)
for d in report:
report[d].sort()
return report, words
def get_list_results(results, models, goldset, ths, rules, mode="ner"):
"""
Write results files considering only doc-level unique entities, as well as a report file with basic stats
:param results: ResultsNER object
:param models: Base model path
:param goldset: Set with gold standard annotations
:param ths: Validation thresholds
:param rules: Validation rules
"""
sysresults = results.corpus.get_unique_results(models, ths, rules, mode)
print "{} unique entries".format(len(sysresults))
print "saving results to {}".format(results.path + "_final.tsv")
with codecs.open(results.path + "_final.tsv", 'w', 'utf-8') as outfile:
outfile.write('\n'.join(['\t'.join(x) for x in sysresults]))
print "getting corpus entities..."
entities = {}
for did in results.corpus.documents:
entities[did] = set()
for sentence in results.corpus.documents[did].sentences:
for s in sentence.entities.elist:
for e in sentence.entities.elist[s]:
entities[did].add(e.normalized)
if goldset:
#lineset = set([(l[0], "0", "0", sysresults[l][-1]) for l in sysresults])
#goldset = set([(g[0], g[1].lower(), g[2].lower()) for g in goldset])
reportlines, tps, fps, fns = compare_results(sysresults, goldset, results.corpus, getwords=True, entities=entities)
with codecs.open(results.path + "_report.txt", 'w', "utf-8") as reportfile:
reportfile.write("TPs: {!s}\nFPs: {!s}\n FNs: {!s}\n".format(len(tps), len(fps), len(fns)))
if len(tps) == 0:
precision = 0
recall = 0
fmeasure = 0
else:
precision = len(tps)/(len(tps) + len(fps))
recall = len(tps)/(len(tps) + len(fns))
fmeasure = (2*precision*recall)/(precision + recall)
reportfile.write("Precision: {!s}\nRecall: {!s}\n".format(precision, recall))
print "precision: {}".format(precision)
print "recall: {}".format(recall)
print "f-measure: {}".format(fmeasure)
for line in reportlines:
reportfile.write(line + '\n')
else:
print "no gold set"
def get_relations_results(results, model, gold_pairs, ths, rules, compare_text=True):
system_pairs = {}
pcount = 0
ptrue = 0
npairs = 0
for did in results.document_pairs:
# npairs += len(results.document_pairs[did].pairs)
for p in results.document_pairs[did].pairs:
pcount += 1
if p.recognized_by.get(model) > 0:
val = p.validate()
if val:
ptrue += 1
pair = (did, (p.entities[0].dstart, p.entities[0].dend), (p.entities[1].dstart, p.entities[1].dend),
#"u"{}={}>{}".format(p.entities[0].text, p.relation, p.entities[1].text))
"")
#system_pairs.append(pair)
between_text = results.corpus.documents[p.entities[0].did].text[p.entities[0].dend:p.entities[1].dstart]
system_pairs[pair] = [u"{}=>{}".format(p.entities[0].type, p.entities[1].type), between_text]
# print random.sample(system_pairs, 5)
# print random.sample(gold_pairs, 5)
# print pcount, ptrue, npairs
if not compare_text:
gold_pairs = [(o[0], o[1], o[2], "") for o in gold_pairs]
reportlines, tps, fps, fns = compare_results(system_pairs, gold_pairs, results.corpus, getwords=compare_text)
with codecs.open(results.path + "_report.txt", 'w', "utf-8") as reportfile:
print "writing report to {}_report.txt".format(results.path)
reportfile.write("TPs: {!s}\nFPs: {!s}\nFNs: {!s}\n".format(len(tps), len(fps), len(fns)))
reportfile.write(">\n")
if len(tps) == 0:
precision, recall, fmeasure = 0, 0, 0
else:
precision, recall = len(tps)/(len(tps) + len(fps)), len(tps)/(len(tps) + len(fns))
fmeasure = 2*precision*recall/(precision+recall)
reportfile.write("Precision: {!s}\nRecall: {!s}\n".format(precision, recall))
reportfile.write(">\n")
for line in reportlines:
reportfile.write(line + '\n')
print "Precision: {:.3f}".format(precision)
print "Recall: {:.3f}".format(recall)
print "Fmeasure: {:.3f}".format(fmeasure)
return precision, recall
def run_anafora(results, models, annotations_path, text_path, ths, rules, etype=""):
if not os.path.exists(results.path + "/files/"):
os.makedirs(results.path + "/files/")
print "writing thyme results to ", results.path + "/files/"
write_tempeval_results(results, models, ths, rules)
r = run_anafora_evaluation(annotations_path, results.path, doctype="all", etype=etype)
print r
def get_results(results, models, gold_offsets, ths, rules, compare_text=True):
"""
Write a report file with basic stats
:param results: ResultsNER object
:param models: Base model path
:param goldset: Set with gold standard annotations
:param ths: Validation thresholds
:param rules: Validation rules
"""
logging.info("getting results...")
offsets = results.corpus.get_entity_offsets(models, ths, rules)
logging.info("done")
for o in offsets:
if o[0] not in results.corpus.documents:
print "DID not found! {}".format(o[0])
sys.exit()
#if not compare_text: #e.g. gold standard does not include the original text
# offsets = [(o[0], o[1], o[2], "") for o in offsets]
#logging.info("system entities: {}; gold entities: {}".format(offsets, gold_offsets))
reportlines, tps, fps, fns = compare_results(offsets, gold_offsets, results.corpus, getwords=compare_text)
with codecs.open(results.path + "_report.txt", 'w', "utf-8") as reportfile:
print "writing report to {}_report.txt".format(results.path)
reportfile.write("TPs: {!s}\nFPs: {!s}\nFNs: {!s}\n".format(len(tps), len(fps), len(fns)))
reportfile.write(">\n")
if len(tps) == 0:
precision = 0
recall = 0
fmeasure = 0
else:
precision = len(tps)/(len(tps) + len(fps))
recall = len(tps)/(len(tps) + len(fns))
fmeasure = 2 * precision * recall / (precision + recall)
reportfile.write("Precision: {!s}\nRecall: {!s}\n".format(precision, recall))
reportfile.write(">\n")
for line in reportlines:
reportfile.write(line + '\n')
print "Precision: {:.3f}".format(precision)
print "Recall: {:.3f}".format(recall)
print "Fmeasure: {:.3f}".format(fmeasure)
return precision, recall
def main():
start_time = time.time()
parser = argparse.ArgumentParser(description='')
parser.add_argument("action", default="evaluate",
help="Actions to be performed.")
parser.add_argument("goldstd", default="chemdner_sample",
help="Gold standard to be used.",
choices=paths.keys())
parser.add_argument("--corpus", dest="corpus",
default="data/chemdner_sample_abstracts.txt.pickle",
help="format path")
parser.add_argument("--results", dest="results", help="Results object pickle.", nargs='+')
parser.add_argument("--models", dest="models", help="model destination path, without extension", default="combined")
parser.add_argument("--ensemble", dest="ensemble", help="name/path of ensemble classifier", default="combined")
parser.add_argument("--chebi", dest="chebi", help="Chebi mapping threshold.", default=0, type=float)
parser.add_argument("--ssm", dest="ssm", help="SSM threshold.", default=0, type=float)
parser.add_argument("--measure", dest="measure", help="semantic similarity measure", default="simui")
parser.add_argument("--log", action="store", dest="loglevel", default="WARNING", help="Log level")
parser.add_argument("--submodels", default="", nargs='+', help="sub types of classifiers"),
parser.add_argument("--rules", default=[], nargs='+', help="aditional post processing rules")
parser.add_argument("--features", default=["chebi", "case", "number", "greek", "dashes", "commas", "length", "chemwords", "bow"],
nargs='+', help="aditional features for ensemble classifier")
parser.add_argument("--doctype", dest="doctype", help="type of document to be considered", default="all")
parser.add_argument("--entitytype", dest="etype", help="type of entities to be considered", default="all")
parser.add_argument("--pairtype", dest="ptype", help="type of pairs to be considered", default=None)
parser.add_argument("--external", action="store_true", default=False, help="Run external evaluation script, depends on corpus type")
parser.add_argument("--output", dest="output", help="Final output", default=None)
options = parser.parse_args()
numeric_level = getattr(logging, options.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.loglevel)
while len(logging.root.handlers) > 0:
logging.root.removeHandler(logging.root.handlers[-1])
logging_format = '%(asctime)s %(levelname)s %(filename)s:%(lineno)s:%(funcName)s %(message)s'
logging.basicConfig(level=numeric_level, format=logging_format)
logging.getLogger().setLevel(numeric_level)
logging.info("Processing action {0} on {1}".format(options.action, options.goldstd))
results_list = []
for results_path in options.results:
logging.info("loading results %s" % results_path + ".pickle")
if os.path.exists(results_path + ".pickle"):
results = pickle.load(open(results_path + ".pickle", 'rb'))
results.load_corpus(options.goldstd)
results.path = results_path
results_list.append(results)
else:
print "results not found"
print results_path
sys.exit()
if options.action in ("combine", "train_ensemble", "test_ensemble", "savetocorpus"):
# merge the results of various results corresponding to different classifiers
# the entities of each sentence are added according to the classifier of each result
# every result should correspond to the same gold standard
# save to the first results path
#results.load_corpus(options.goldstd)
#logging.info("combining results...")
#results.combine_results(options.models, options.models + "_combined")
#results.save(options.results + "_combined.pickle")
base_result = results_list[0]
for result in results_list[1:]:
logging.info("adding {}...".format(result.path))
base_result.add_results(result)
if options.action == "combine":
base_result.combine_results(options.etype, options.models)
n_sentences, n_docs, n_entities, n_relations = 0, 0, 0, 0
for did in base_result.corpus.documents:
n_docs += 1
for sentence in base_result.corpus.documents[did].sentences:
n_sentences += 1
for e in sentence.entities.elist[options.models]:
n_entities += 1
logging.info("Combined {} docs, {} sentences, {} entities".format(n_docs, n_sentences, n_entities))
base_result.save(options.models + ".pickle")
elif options.action == "savetocorpus":
base_result.corpus.save(options.output + ".pickle")
elif options.action == "train_ensemble":
pipeline = Pipeline(
[
#('clf', SGDClassifier(loss='hinge', penalty='l1', alpha=0.0001, n_iter=5, random_state=42)),
#('clf', SGDClassifier())
#('clf', svm.NuSVC(nu=0.01 ))
# ('clf', RandomForestClassifier(class_weight={False:1, True:1}, n_jobs=-1, criterion="entropy", warm_start=True))
#('clf', tree.DecisionTreeClassifier(criterion="entropy")),
#('clf', MultinomialNB())
#('clf', GaussianNB())
('clf', svm.SVC(kernel="rbf", degree=2, C=1))
#('clf', DummyClassifier(strategy="constant", constant=True))
])
print pipeline
base_result.train_ensemble(pipeline, options.models, options.etype)
elif options.action == "test_ensemble":
pipeline = joblib.load("{}/{}/{}.pkl".format("models/ensemble/", options.models, options.models))
print pipeline
base_result.test_ensemble(pipeline, options.models, options.etype)
base_result.save("results/" + options.models + ".pickle")
elif options.action in ("evaluate", "evaluate_list", "count_entities"):
counts = {}
if options.action == "count_entities":
for did in results_list[0].corpus.documents:
for sentence in results_list[0].corpus.documents[did].sentences:
print sentence.entities.elist.keys()
if options.models in sentence.entities.elist:
for e in sentence.entities.elist[options.models]:
if e.type not in counts:
counts[e.type] = 0
counts[e.type] += 1
print counts
sys.exit()
if paths[options.goldstd].get("annotations"):
logging.info("loading gold standard %s" % paths[options.goldstd]["annotations"])
goldset = get_gold_ann_set(paths[options.goldstd]["format"], paths[options.goldstd]["annotations"],
options.etype, options.ptype, paths[options.goldstd]["text"])
else:
goldset = ({}, {})
logging.info("using thresholds: chebi > {!s} ssm > {!s}".format(options.chebi, options.ssm))
#results.load_corpus(options.goldstd)
#results.path = options.results
ths = {"chebi": options.chebi}
if options.ssm > 0:
ths["ssm"] = options.ssm
if options.action == "evaluate":
for result in results_list:
if options.ptype: # evaluate this pair type
get_relations_results(result, options.models, goldset[1], ths, options.rules)
else: # evaluate an entity type
get_results(result, options.models, goldset[0], ths, options.rules)
if options.external:
write_chemdner_files(results, options.models, goldset, ths, options.rules)
#evaluation = run_chemdner_evaluation(paths[options.goldstd]["cem"],
# options.results[0] + ".tsv")
#print evaluation
elif options.action == "evaluate_list": # ignore the spans, the gold standard is a list of unique entities
for result in results_list:
if options.ptype:
get_list_results(result, options.models, goldset[1], ths, options.rules, mode="re")
else:
get_list_results(result, options.models, goldset[0], ths, options.rules)
elif options.action == "anafora":
for result in results_list:
run_anafora(result, options.models, paths[options.goldstd]["annotations"], paths[options.goldstd]["text"], {},
options.rules, options.etype)
total_time = time.time() - start_time
logging.info("Total time: %ss" % total_time)
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
from twisted.cred import portal
from twisted.python import components, log
from twisted.internet.process import ProcessExitedAlready
from zope import interface
from ssh import session, forwarding, filetransfer
from ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, FXF_CREAT, FXF_TRUNC, FXF_EXCL
from twisted.conch.ls import lsLine
from avatar import ConchUser
from error import ConchError
from interfaces import ISession, ISFTPServer, ISFTPFile
import struct, os, time, socket
import fcntl, tty
import pwd, grp
import pty
import ttymodes
try:
import utmp
except ImportError:
utmp = None
class UnixSSHRealm:
interface.implements(portal.IRealm)
def requestAvatar(self, username, mind, *interfaces):
user = UnixConchUser(username)
return interfaces[0], user, user.logout
class UnixConchUser(ConchUser):
def __init__(self, username):
ConchUser.__init__(self)
self.username = username
self.pwdData = pwd.getpwnam(self.username)
l = [self.pwdData[3]]
for groupname, password, gid, userlist in grp.getgrall():
if username in userlist:
l.append(gid)
self.otherGroups = l
self.listeners = {} # dict mapping (interface, port) -> listener
self.channelLookup.update(
{"session": session.SSHSession,
"direct-tcpip": forwarding.openConnectForwardingClient})
self.subsystemLookup.update(
{"sftp": filetransfer.FileTransferServer})
def getUserGroupId(self):
return self.pwdData[2:4]
def getOtherGroups(self):
return self.otherGroups
def getHomeDir(self):
return self.pwdData[5]
def getShell(self):
return self.pwdData[6]
def global_tcpip_forward(self, data):
hostToBind, portToBind = forwarding.unpackGlobal_tcpip_forward(data)
from twisted.internet import reactor
try: listener = self._runAsUser(
reactor.listenTCP, portToBind,
forwarding.SSHListenForwardingFactory(self.conn,
(hostToBind, portToBind),
forwarding.SSHListenServerForwardingChannel),
interface = hostToBind)
except:
return 0
else:
self.listeners[(hostToBind, portToBind)] = listener
if portToBind == 0:
portToBind = listener.getHost()[2] # the port
return 1, struct.pack('>L', portToBind)
else:
return 1
def global_cancel_tcpip_forward(self, data):
hostToBind, portToBind = forwarding.unpackGlobal_tcpip_forward(data)
listener = self.listeners.get((hostToBind, portToBind), None)
if not listener:
return 0
del self.listeners[(hostToBind, portToBind)]
self._runAsUser(listener.stopListening)
return 1
def logout(self):
# remove all listeners
for listener in self.listeners.itervalues():
self._runAsUser(listener.stopListening)
log.msg('avatar %s logging out (%i)' % (self.username, len(self.listeners)))
def _runAsUser(self, f, *args, **kw):
euid = os.geteuid()
egid = os.getegid()
groups = os.getgroups()
uid, gid = self.getUserGroupId()
os.setegid(0)
os.seteuid(0)
os.setgroups(self.getOtherGroups())
os.setegid(gid)
os.seteuid(uid)
try:
f = iter(f)
except TypeError:
f = [(f, args, kw)]
try:
for i in f:
func = i[0]
args = len(i)>1 and i[1] or ()
kw = len(i)>2 and i[2] or {}
r = func(*args, **kw)
finally:
os.setegid(0)
os.seteuid(0)
os.setgroups(groups)
os.setegid(egid)
os.seteuid(euid)
return r
class SSHSessionForUnixConchUser:
interface.implements(ISession)
def __init__(self, avatar):
self.avatar = avatar
self. environ = {'PATH':'/bin:/usr/bin:/usr/local/bin'}
self.pty = None
self.ptyTuple = 0
def addUTMPEntry(self, loggedIn=1):
if not utmp:
return
ipAddress = self.avatar.conn.transport.transport.getPeer().host
packedIp ,= struct.unpack('L', socket.inet_aton(ipAddress))
ttyName = self.ptyTuple[2][5:]
t = time.time()
t1 = int(t)
t2 = int((t-t1) * 1e6)
entry = utmp.UtmpEntry()
entry.ut_type = loggedIn and utmp.USER_PROCESS or utmp.DEAD_PROCESS
entry.ut_pid = self.pty.pid
entry.ut_line = ttyName
entry.ut_id = ttyName[-4:]
entry.ut_tv = (t1,t2)
if loggedIn:
entry.ut_user = self.avatar.username
entry.ut_host = socket.gethostbyaddr(ipAddress)[0]
entry.ut_addr_v6 = (packedIp, 0, 0, 0)
a = utmp.UtmpRecord(utmp.UTMP_FILE)
a.pututline(entry)
a.endutent()
b = utmp.UtmpRecord(utmp.WTMP_FILE)
b.pututline(entry)
b.endutent()
def getPty(self, term, windowSize, modes):
self.environ['TERM'] = term
self.winSize = windowSize
self.modes = modes
master, slave = pty.openpty()
ttyname = os.ttyname(slave)
self.environ['SSH_TTY'] = ttyname
self.ptyTuple = (master, slave, ttyname)
def openShell(self, proto):
from twisted.internet import reactor
if not self.ptyTuple: # we didn't get a pty-req
log.msg('tried to get shell without pty, failing')
raise ConchError("no pty")
uid, gid = self.avatar.getUserGroupId()
homeDir = self.avatar.getHomeDir()
shell = self.avatar.getShell()
self.environ['USER'] = self.avatar.username
self.environ['HOME'] = homeDir
self.environ['SHELL'] = shell
shellExec = os.path.basename(shell)
peer = self.avatar.conn.transport.transport.getPeer()
host = self.avatar.conn.transport.transport.getHost()
self.environ['SSH_CLIENT'] = '%s %s %s' % (peer.host, peer.port, host.port)
self.getPtyOwnership()
self.pty = reactor.spawnProcess(proto, \
shell, ['-%s' % shellExec], self.environ, homeDir, uid, gid,
usePTY = self.ptyTuple)
self.addUTMPEntry()
fcntl.ioctl(self.pty.fileno(), tty.TIOCSWINSZ,
struct.pack('4H', *self.winSize))
if self.modes:
self.setModes()
self.oldWrite = proto.transport.write
proto.transport.write = self._writeHack
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def execCommand(self, proto, cmd):
from twisted.internet import reactor
uid, gid = self.avatar.getUserGroupId()
homeDir = self.avatar.getHomeDir()
shell = self.avatar.getShell() or '/bin/sh'
command = (shell, '-c', cmd)
peer = self.avatar.conn.transport.transport.getPeer()
host = self.avatar.conn.transport.transport.getHost()
self.environ['SSH_CLIENT'] = '%s %s %s' % (peer.host, peer.port, host.port)
if self.ptyTuple:
self.getPtyOwnership()
self.pty = reactor.spawnProcess(proto, \
shell, command, self.environ, homeDir,
uid, gid, usePTY = self.ptyTuple or 0)
if self.ptyTuple:
self.addUTMPEntry()
if self.modes:
self.setModes()
# else:
# tty.setraw(self.pty.pipes[0].fileno(), tty.TCSANOW)
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def getPtyOwnership(self):
ttyGid = os.stat(self.ptyTuple[2])[5]
uid, gid = self.avatar.getUserGroupId()
euid, egid = os.geteuid(), os.getegid()
os.setegid(0)
os.seteuid(0)
try:
os.chown(self.ptyTuple[2], uid, ttyGid)
finally:
os.setegid(egid)
os.seteuid(euid)
def setModes(self):
pty = self.pty
attr = tty.tcgetattr(pty.fileno())
for mode, modeValue in self.modes:
if not ttymodes.TTYMODES.has_key(mode): continue
ttyMode = ttymodes.TTYMODES[mode]
if len(ttyMode) == 2: # flag
flag, ttyAttr = ttyMode
if not hasattr(tty, ttyAttr): continue
ttyval = getattr(tty, ttyAttr)
if modeValue:
attr[flag] = attr[flag]|ttyval
else:
attr[flag] = attr[flag]&~ttyval
elif ttyMode == 'OSPEED':
attr[tty.OSPEED] = getattr(tty, 'B%s'%modeValue)
elif ttyMode == 'ISPEED':
attr[tty.ISPEED] = getattr(tty, 'B%s'%modeValue)
else:
if not hasattr(tty, ttyMode): continue
ttyval = getattr(tty, ttyMode)
attr[tty.CC][ttyval] = chr(modeValue)
tty.tcsetattr(pty.fileno(), tty.TCSANOW, attr)
def eofReceived(self):
if self.pty:
self.pty.closeStdin()
def closed(self):
if self.ptyTuple and os.path.exists(self.ptyTuple[2]):
ttyGID = os.stat(self.ptyTuple[2])[5]
os.chown(self.ptyTuple[2], 0, ttyGID)
if self.pty:
try:
self.pty.signalProcess('HUP')
except (OSError,ProcessExitedAlready):
pass
self.pty.loseConnection()
self.addUTMPEntry(0)
log.msg('shell closed')
def windowChanged(self, winSize):
self.winSize = winSize
fcntl.ioctl(self.pty.fileno(), tty.TIOCSWINSZ,
struct.pack('4H', *self.winSize))
def _writeHack(self, data):
"""
Hack to send ignore messages when we aren't echoing.
"""
if self.pty is not None:
attr = tty.tcgetattr(self.pty.fileno())[3]
if not attr & tty.ECHO and attr & tty.ICANON: # no echo
self.avatar.conn.transport.sendIgnore('\x00'*(8+len(data)))
self.oldWrite(data)
class SFTPServerForUnixConchUser:
interface.implements(ISFTPServer)
def __init__(self, avatar):
self.avatar = avatar
def _setAttrs(self, path, attrs):
"""
NOTE: this function assumes it runs as the logged-in user:
i.e. under _runAsUser()
"""
if attrs.has_key("uid") and attrs.has_key("gid"):
os.chown(path, attrs["uid"], attrs["gid"])
if attrs.has_key("permissions"):
os.chmod(path, attrs["permissions"])
if attrs.has_key("atime") and attrs.has_key("mtime"):
os.utime(path, (attrs["atime"], attrs["mtime"]))
def _getAttrs(self, s):
return {
"size" : s.st_size,
"uid" : s.st_uid,
"gid" : s.st_gid,
"permissions" : s.st_mode,
"atime" : int(s.st_atime),
"mtime" : int(s.st_mtime)
}
def _absPath(self, path):
home = self.avatar.getHomeDir()
return os.path.abspath(os.path.join(home, path))
def gotVersion(self, otherVersion, extData):
return {}
def openFile(self, filename, flags, attrs):
return UnixSFTPFile(self, self._absPath(filename), flags, attrs)
def removeFile(self, filename):
filename = self._absPath(filename)
return self.avatar._runAsUser(os.remove, filename)
def renameFile(self, oldpath, newpath):
oldpath = self._absPath(oldpath)
newpath = self._absPath(newpath)
return self.avatar._runAsUser(os.rename, oldpath, newpath)
def makeDirectory(self, path, attrs):
path = self._absPath(path)
return self.avatar._runAsUser([(os.mkdir, (path,)),
(self._setAttrs, (path, attrs))])
def removeDirectory(self, path):
path = self._absPath(path)
self.avatar._runAsUser(os.rmdir, path)
def openDirectory(self, path):
return UnixSFTPDirectory(self, self._absPath(path))
def getAttrs(self, path, followLinks):
path = self._absPath(path)
if followLinks:
s = self.avatar._runAsUser(os.stat, path)
else:
s = self.avatar._runAsUser(os.lstat, path)
return self._getAttrs(s)
def setAttrs(self, path, attrs):
path = self._absPath(path)
self.avatar._runAsUser(self._setAttrs, path, attrs)
def readLink(self, path):
path = self._absPath(path)
return self.avatar._runAsUser(os.readlink, path)
def makeLink(self, linkPath, targetPath):
linkPath = self._absPath(linkPath)
targetPath = self._absPath(targetPath)
return self.avatar._runAsUser(os.symlink, targetPath, linkPath)
def realPath(self, path):
return os.path.realpath(self._absPath(path))
def extendedRequest(self, extName, extData):
raise NotImplementedError
class UnixSFTPFile:
interface.implements(ISFTPFile)
def __init__(self, server, filename, flags, attrs):
self.server = server
openFlags = 0
if flags & FXF_READ == FXF_READ and flags & FXF_WRITE == 0:
openFlags = os.O_RDONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == 0:
openFlags = os.O_WRONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == FXF_READ:
openFlags = os.O_RDWR
if flags & FXF_APPEND == FXF_APPEND:
openFlags |= os.O_APPEND
if flags & FXF_CREAT == FXF_CREAT:
openFlags |= os.O_CREAT
if flags & FXF_TRUNC == FXF_TRUNC:
openFlags |= os.O_TRUNC
if flags & FXF_EXCL == FXF_EXCL:
openFlags |= os.O_EXCL
if attrs.has_key("permissions"):
mode = attrs["permissions"]
del attrs["permissions"]
else:
mode = 0777
fd = server.avatar._runAsUser(os.open, filename, openFlags, mode)
if attrs:
server.avatar._runAsUser(server._setAttrs, filename, attrs)
self.fd = fd
def close(self):
return self.server.avatar._runAsUser(os.close, self.fd)
def readChunk(self, offset, length):
return self.server.avatar._runAsUser([ (os.lseek, (self.fd, offset, 0)),
(os.read, (self.fd, length)) ])
def writeChunk(self, offset, data):
return self.server.avatar._runAsUser([(os.lseek, (self.fd, offset, 0)),
(os.write, (self.fd, data))])
def getAttrs(self):
s = self.server.avatar._runAsUser(os.fstat, self.fd)
return self.server._getAttrs(s)
def setAttrs(self, attrs):
raise NotImplementedError
class UnixSFTPDirectory:
def __init__(self, server, directory):
self.server = server
self.files = server.avatar._runAsUser(os.listdir, directory)
self.dir = directory
def __iter__(self):
return self
def next(self):
try:
f = self.files.pop(0)
except IndexError:
raise StopIteration
else:
s = self.server.avatar._runAsUser(os.lstat, os.path.join(self.dir, f))
longname = lsLine(f, s)
attrs = self.server._getAttrs(s)
return (f, longname, attrs)
def close(self):
self.files = []
components.registerAdapter(SFTPServerForUnixConchUser, UnixConchUser, filetransfer.ISFTPServer)
components.registerAdapter(SSHSessionForUnixConchUser, UnixConchUser, session.ISession)
|
|
#!/usr/bin/env python
#
# This code is a part of `ardrone_autopilot` project
# which is distributed under the MIT license.
# See `LICENSE` file for details.
#
"""UI node for visualizing the drone state
This node provides the QT user interface and keyboard input.
Inputs
------
* /ardrone/navdata -- information about the drone state.
* /ui/message -- messages stream.
All messages published to this stream will be displayed.
Those messages formed like `name::messsage` will update information
on the left side of the screen (you should add all names to the `grid` list
in order to get those messages displayed).
Messages which doesn't match the above pattern will be shown on the right
side of screen. They will be displayed for `message_display_time` time
(which is 5sec. by default) and then removed from screen.
* /in/image -- main picture stream.
Outputs
-------
We use `DroneController` class to emit user control information.
* /ardrone/land -- single `land` command
* /ardrone/takeoff -- single `takeoff` command
* /ardrone/reset -- single `reset` command
* /cmd_vel -- velocity control commands (send on each keypress)
Parameters
----------
* ~message_display_time = 5000 [uint] -- time after which
anonymous messages will be hidden away from screan (in milliseconds).
* ~connection_check_period = 500 [uint] -- consider dorne is offline if we had
no messages for more than this time (in milliseconds).
* ~fps = 50 [uint] -- interface update rate.
* ~swap_red_blue = False [bool] -- set this to `True` if you need to swap
red and blue channels (if you have to enable this, check that other nodes
work fine with this stream; it's better to swap image color before
passing it to the system, not after).
"""
import re
from collections import deque
from threading import Lock
from PySide import QtCore, QtGui
import rospy
from std_msgs.msg import String, Empty
from sensor_msgs.msg import Image
from ardrone_autonomy.msg import Navdata
from utils.drone import DroneController
# Message groups
grid = [
'drone.status',
None,
'drone.battery',
None,
['tgt.x',
'tgt.y',
'tgt.z'],
]
class Messages(object):
def __init__(self, message_display_time, *args):
self.message_structure = args
self.messages_named = {}
self.messages_queue = deque()
self.message_display_time = message_display_time
self.lock = Lock()
def messages_put(self, messages):
"""Add new messages to UI"""
with self.lock:
for message, name in messages:
if name is None:
self.messages_queue.append((message, rospy.get_time()))
else:
self.messages_named[name] = message
def message_put(self, message, name=None):
"""Add one new message to UI"""
self.messages_put([(message, name), ])
def messages_flush(self):
"""Remove all messages"""
with self.lock:
messages = {}
def clean_queue(self):
"""Remove all outdated messages from the queue"""
with self.lock:
while (self.messages_queue and
rospy.get_time() - self.messages_queue[0][1] >
self.message_display_time / 1000):
self.messages_queue.popleft()
def render(self, image):
"""Print all messages onto the given image"""
self.clean_queue()
painter = QtGui.QPainter()
painter.begin(image)
painter.setPen(QtGui.QColor(255, 255, 255))
width, height = image.width(), image.height()
x, y = int(width * .05), int(height * .05)
column_width = 250
with self.lock:
for entry in self.message_structure:
if entry is None:
entry = []
if not isinstance(entry, (list, tuple)):
entry = [entry]
for name in entry:
value = self.messages_named.get(name, '-')
if value is None:
value = '-'
painter.drawText(x, y, '%s: %s' % (name, value))
y += 15
y += 5
if y > int(height * .9):
y = int(height * .05)
x += column_width
x, y = int(width - column_width), int(height * .05)
for text, time in self.messages_queue:
painter.drawText(x, y, text)
y += 15
if y > int(height * .9):
y = int(height * .05)
x -= column_width
painter.end()
class UInode(QtGui.QMainWindow):
def __init__(self):
super(UInode, self).__init__()
self._ap_topic = rospy.Publisher('/apctrl', Empty,
queue_size=5)
self.swap_red_blue = rospy.get_param('~swap_red_blue', False)
self.controller = DroneController(
offline_timeout=rospy.get_param('~connection_check_period', 500))
self.keymap = self.gen_keymap()
self.messages = Messages(
rospy.get_param('~message_display_time', 5000), *grid)
self.messages_named_template = re.compile(
r'((?P<name>[a-zA-Z0-9_-]+)::)?(?P<message>.*)')
self.setWindowTitle('ARdrone camera')
self.image_box = QtGui.QLabel(self)
self.setCentralWidget(self.image_box)
self.image = None
self.image_lock = Lock()
fps = rospy.get_param('~fps', 50)
self.redraw_timer = QtCore.QTimer(self)
self.redraw_timer.timeout.connect(self.on_redraw)
self.redraw_timer.start(1000 / fps)
rospy.Subscriber('/ui/message', String, self.on_ui_request)
rospy.Subscriber('/in/image', Image, self.on_video_update)
def on_ui_request(self, message):
"""Process the message show request
We have spetial `ui/message` topic where any node can send
any message and that message will be displayed.
By default, messages are displayed for a while and than hidden.
Messages which match the mask `([a-zA-Z0-9_-])::(.*)` will be displayed
permanently. Newer messages will overwrite older messages
with the same name.
"""
match = self.messages_named_template.match(message.data)
self.messages.message_put(**match.groupdict())
def on_video_update(self, data):
"""On each frame we save new picture for future rendering"""
self.communication_since_timer = True
image = QtGui.QImage(data.data,
data.width,
data.height,
QtGui.QImage.Format_RGB888)
if self.swap_red_blue:
image = QtGui.QImage.rgbSwapped(image)
with self.image_lock:
self.image = image
def on_redraw(self):
"""Redraw interface"""
image = None
with self.image_lock:
if self.controller.is_online and self.image is not None:
image = QtGui.QPixmap.fromImage(self.image)
else:
image = QtGui.QPixmap(640, 360)
image.fill(QtGui.QColor(50, 50, 50))
self.messages.messages_put((
(self.controller.status.readable(), 'drone.status'),
(self.controller.battery, 'drone.battery'),
))
self.messages.render(image)
self.resize(image.width(), image.height())
self.image_box.setPixmap(image)
def gen_keymap(self):
return {
QtCore.Qt.Key.Key_R: lambda ax, e: self.controller.reset(),
QtCore.Qt.Key.Key_T: lambda ax, e: self.controller.takeoff(),
QtCore.Qt.Key.Key_L: lambda ax, e: self.controller.land(),
QtCore.Qt.Key.Key_H: lambda ax, e: self.controller.hover(),
QtCore.Qt.Key.Key_A: lambda ax, e: self.controller.send_vel(y=ax),
QtCore.Qt.Key.Key_D: lambda ax, e: self.controller.send_vel(y=-ax),
QtCore.Qt.Key.Key_W: lambda ax, e: self.controller.send_vel(x=ax),
QtCore.Qt.Key.Key_S: lambda ax, e: self.controller.send_vel(x=-ax),
QtCore.Qt.Key.Key_Q: lambda ax, e: self.controller.send_vel(yaw=ax),
QtCore.Qt.Key.Key_E: lambda ax, e: self.controller.send_vel(yaw=-ax),
QtCore.Qt.Key.Key_BracketRight: lambda ax, e: self.controller.send_vel(z=ax),
QtCore.Qt.Key.Key_BracketLeft: lambda ax, e: self.controller.send_vel(z=-ax),
QtCore.Qt.Key.Key_Y: lambda ax, e: self._ap_topic.publish(Empty()) if ax != 0 else None,
}
def keyPressEvent(self, event):
key = event.key()
if event.isAutoRepeat() or self.controller is None:
return
if key in self.keymap:
self.keymap[key](1, event)
def keyReleaseEvent(self, event):
key = event.key()
if event.isAutoRepeat() or self.controller is None:
return
if key in self.keymap:
self.keymap[key](0, event)
if __name__ == '__main__':
import sys
rospy.init_node('ui_node')
rospy.loginfo('Starting user interface')
app = QtGui.QApplication(sys.argv)
ui = UInode()
ui.show()
status = app.exec_()
sys.exit(status)
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally import exceptions as rally_exceptions
from rally.plugins.openstack.scenarios.nova import servers
from tests.unit import fakes
from tests.unit import test
NOVA_SERVERS_MODULE = "rally.plugins.openstack.scenarios.nova.servers"
NOVA_SERVERS = NOVA_SERVERS_MODULE + ".NovaServers"
@ddt.ddt
class NovaServersTestCase(test.ScenarioTestCase):
def test_boot_rescue_unrescue(self):
actions = [{"rescue_unrescue": 5}]
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._rescue_server = mock.MagicMock()
scenario._unrescue_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.boot_and_bounce_server("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
self.assertEqual(5, scenario._rescue_server.call_count,
"Rescue not called 5 times")
self.assertEqual(5, scenario._unrescue_server.call_count,
"Unrescue not called 5 times")
scenario._rescue_server.assert_has_calls(server_calls)
scenario._unrescue_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_stop_start(self):
actions = [{"stop_start": 5}]
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._start_server = mock.MagicMock()
scenario._stop_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.boot_and_bounce_server("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
self.assertEqual(5, scenario._stop_server.call_count,
"Stop not called 5 times")
self.assertEqual(5, scenario._start_server.call_count,
"Start not called 5 times")
scenario._stop_server.assert_has_calls(server_calls)
scenario._start_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_multiple_bounce_actions(self):
actions = [{"hard_reboot": 5}, {"stop_start": 8}]
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._reboot_server = mock.MagicMock()
scenario._stop_and_start_server = mock.MagicMock()
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.boot_and_bounce_server("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
self.assertEqual(5, scenario._reboot_server.call_count,
"Reboot not called 5 times")
scenario._reboot_server.assert_has_calls(server_calls)
server_calls = []
for i in range(8):
server_calls.append(mock.call(fake_server))
self.assertEqual(8, scenario._stop_and_start_server.call_count,
"Stop/Start not called 8 times")
scenario._stop_and_start_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_lock_unlock_and_delete(self):
server = fakes.FakeServer()
image = fakes.FakeImage()
flavor = fakes.FakeFlavor()
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.Mock(return_value=server)
scenario._lock_server = mock.Mock(side_effect=lambda s: s.lock())
scenario._unlock_server = mock.Mock(side_effect=lambda s: s.unlock())
scenario._delete_server = mock.Mock(
side_effect=lambda s, **kwargs:
self.assertFalse(getattr(s, "OS-EXT-STS:locked", False)))
scenario.boot_lock_unlock_and_delete(image, flavor, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(image, flavor,
fakearg="fakearg")
scenario._lock_server.assert_called_once_with(server)
scenario._unlock_server.assert_called_once_with(server)
scenario._delete_server.assert_called_once_with(server, force=False)
def test_validate_actions(self):
actions = [{"hardd_reboot": 6}]
scenario = servers.NovaServers(self.context)
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
actions = [{"hard_reboot": "no"}]
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
actions = {"hard_reboot": 6}
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
actions = {"hard_reboot": -1}
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
actions = {"hard_reboot": 0}
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
def _verify_reboot(self, soft=True):
actions = [{"soft_reboot" if soft else "hard_reboot": 5}]
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario._reboot_server = mock.MagicMock()
scenario._soft_reboot_server = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.boot_and_bounce_server("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
if soft:
self.assertEqual(5, scenario._soft_reboot_server.call_count,
"Reboot not called 5 times")
scenario._soft_reboot_server.assert_has_calls(server_calls)
else:
self.assertEqual(5, scenario._reboot_server.call_count,
"Reboot not called 5 times")
scenario._reboot_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_soft_reboot(self):
self._verify_reboot(soft=True)
def test_boot_hard_reboot(self):
self._verify_reboot(soft=False)
def test_boot_and_delete_server(self):
fake_server = object()
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
scenario.boot_and_delete_server("img", 0, 10, 20, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_and_delete_multiple_servers(self):
scenario = servers.NovaServers(self.context)
scenario._boot_servers = mock.Mock()
scenario._delete_servers = mock.Mock()
scenario.sleep_between = mock.Mock()
scenario.boot_and_delete_multiple_servers("img", "flavor", count=15,
min_sleep=10,
max_sleep=20,
fakearg="fakearg")
scenario._boot_servers.assert_called_once_with("img", "flavor", 1,
instances_amount=15,
fakearg="fakearg")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_servers.assert_called_once_with(
scenario._boot_servers.return_value, force=False)
def test_boot_and_list_server(self):
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock()
scenario._list_servers = mock.MagicMock()
scenario.boot_and_list_server("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._list_servers.assert_called_once_with(True)
def test_suspend_and_resume_server(self):
fake_server = object()
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._suspend_server = mock.MagicMock()
scenario._resume_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.suspend_and_resume_server("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._suspend_server.assert_called_once_with(fake_server)
scenario._resume_server.assert_called_once_with(fake_server)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_pause_and_unpause_server(self):
fake_server = object()
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._pause_server = mock.MagicMock()
scenario._unpause_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.pause_and_unpause_server("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._pause_server.assert_called_once_with(fake_server)
scenario._unpause_server.assert_called_once_with(fake_server)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_shelve_and_unshelve_server(self):
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._shelve_server = mock.MagicMock()
scenario._unshelve_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.shelve_and_unshelve_server("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._shelve_server.assert_called_once_with(fake_server)
scenario._unshelve_server.assert_called_once_with(fake_server)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_list_servers(self):
scenario = servers.NovaServers(self.context)
scenario._list_servers = mock.MagicMock()
scenario.list_servers(True)
scenario._list_servers.assert_called_once_with(True)
def test_boot_server_from_volume_and_delete(self):
fake_server = object()
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.sleep_between = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
fake_volume = fakes.FakeVolumeManager().create()
fake_volume.id = "volume_id"
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.boot_server_from_volume_and_delete("img", 0, 5, 10, 20,
fakearg="f")
scenario._create_volume.assert_called_once_with(5, imageRef="img")
scenario._boot_server.assert_called_once_with(
"img", 0,
block_device_mapping={"vda": "volume_id:::1"},
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def _prepare_boot(self, nic=None, assert_nic=False):
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.generate_random_name = mock.MagicMock(return_value="name")
kwargs = {"fakearg": "f"}
expected_kwargs = {"fakearg": "f"}
assert_nic = nic or assert_nic
if nic:
kwargs["nics"] = nic
if assert_nic:
self.clients("nova").networks.create("net-1")
expected_kwargs["nics"] = nic or [{"net-id": "net-2"}]
return scenario, kwargs, expected_kwargs
def _verify_boot_server(self, nic=None, assert_nic=False):
scenario, kwargs, expected_kwargs = self._prepare_boot(
nic=nic, assert_nic=assert_nic)
scenario.boot_server("img", 0, **kwargs)
scenario._boot_server.assert_called_once_with(
"img", 0, auto_assign_nic=False, **expected_kwargs)
def test_boot_server_no_nics(self):
self._verify_boot_server(nic=None, assert_nic=False)
def test_boot_server_with_nic(self):
self._verify_boot_server(nic=[{"net-id": "net-1"}], assert_nic=True)
def test_snapshot_server(self):
fake_server = object()
fake_image = fakes.FakeImageManager()._create()
fake_image.id = "image_id"
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._create_image = mock.MagicMock(return_value=fake_image)
scenario._delete_server = mock.MagicMock()
scenario._delete_image = mock.MagicMock()
scenario.snapshot_server("i", 0, fakearg=2)
scenario._boot_server.assert_has_calls([
mock.call("i", 0, fakearg=2),
mock.call("image_id", 0, fakearg=2)])
scenario._create_image.assert_called_once_with(fake_server)
scenario._delete_server.assert_has_calls([
mock.call(fake_server, force=False),
mock.call(fake_server, force=False)])
scenario._delete_image.assert_called_once_with(fake_image)
def _test_resize(self, confirm=False):
fake_server = object()
fake_image = fakes.FakeImageManager()._create()
fake_image.id = "image_id"
flavor = mock.MagicMock()
to_flavor = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._resize = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
kwargs = {"confirm": confirm}
scenario.resize_server(fake_image, flavor, to_flavor, **kwargs)
scenario._resize.assert_called_once_with(fake_server, to_flavor)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server)
else:
scenario._resize_revert.assert_called_once_with(fake_server)
def test_resize_with_confirm(self):
self._test_resize(confirm=True)
def test_resize_with_revert(self):
self._test_resize(confirm=False)
@ddt.data({"confirm": True, "do_delete": True},
{"confirm": False, "do_delete": True})
@ddt.unpack
def test_boot_server_attach_created_volume_and_resize(self, confirm=False,
do_delete=False):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
flavor = mock.MagicMock()
to_flavor = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._attach_volume = mock.MagicMock()
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._resize = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
volume_size = 10
scenario.boot_server_attach_created_volume_and_resize(
"img", flavor, to_flavor, volume_size, min_sleep=10,
max_sleep=20, confirm=confirm, do_delete=do_delete)
scenario._boot_server.assert_called_once_with("img", flavor)
scenario._create_volume.assert_called_once_with(volume_size)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._resize.assert_called_once_with(fake_server, to_flavor)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server)
else:
scenario._resize_revert.assert_called_once_with(fake_server)
if do_delete:
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_and_live_migrate_server(self):
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.sleep_between = mock.MagicMock()
scenario._find_host_to_migrate = mock.MagicMock(
return_value="host_name")
scenario._live_migrate = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.boot_and_live_migrate_server("img", 0, min_sleep=10,
max_sleep=20, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._find_host_to_migrate.assert_called_once_with(fake_server)
scenario._live_migrate.assert_called_once_with(fake_server,
"host_name",
False, False)
scenario._delete_server.assert_called_once_with(fake_server)
def test_boot_server_from_volume_and_live_migrate(self):
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.sleep_between = mock.MagicMock()
scenario._find_host_to_migrate = mock.MagicMock(
return_value="host_name")
scenario._live_migrate = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
fake_volume = fakes.FakeVolumeManager().create()
fake_volume.id = "volume_id"
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.boot_server_from_volume_and_live_migrate("img", 0, 5,
min_sleep=10,
max_sleep=20,
fakearg="f")
scenario._create_volume.assert_called_once_with(5, imageRef="img")
scenario._boot_server.assert_called_once_with(
"img", 0,
block_device_mapping={"vda": "volume_id:::1"},
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._find_host_to_migrate.assert_called_once_with(fake_server)
scenario._live_migrate.assert_called_once_with(fake_server,
"host_name",
False, False)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_server_attach_created_volume_and_live_migrate(self):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
scenario._find_host_to_migrate = mock.MagicMock(
return_value="host_name")
scenario._live_migrate = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
image = "img"
flavor = "flavor"
size = 5
boot_kwargs = {"some_var": "asd"}
scenario.boot_server_attach_created_volume_and_live_migrate(
image, flavor, size, min_sleep=10, max_sleep=20,
boot_server_kwargs=boot_kwargs)
scenario._boot_server.assert_called_once_with(image, flavor,
**boot_kwargs)
scenario._create_volume.assert_called_once_with(size)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._live_migrate.assert_called_once_with(fake_server,
"host_name",
False, False)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server)
def _test_boot_and_migrate_server(self, confirm=False):
fake_server = mock.MagicMock()
scenario = servers.NovaServers(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._stop_server = mock.MagicMock()
scenario._migrate = mock.MagicMock()
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
kwargs = {"confirm": confirm}
scenario.boot_and_migrate_server("img", 0,
fakearg="fakearg", **kwargs)
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg",
confirm=confirm)
scenario._stop_server.assert_called_once_with(fake_server)
scenario._migrate.assert_called_once_with(fake_server)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server,
status="SHUTOFF")
else:
scenario._resize_revert.assert_called_once_with(fake_server,
status="SHUTOFF")
scenario._delete_server.assert_called_once_with(fake_server)
def test_boot_and_migrate_server_with_confirm(self):
self._test_boot_and_migrate_server(confirm=True)
def test_boot_and_migrate_server_with_revert(self):
self._test_boot_and_migrate_server(confirm=False)
def test_boot_and_rebuild_server(self):
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.Mock()
scenario._rebuild_server = mock.Mock()
scenario._delete_server = mock.Mock()
from_image = "img1"
to_image = "img2"
flavor = "flavor"
scenario.boot_and_rebuild_server(from_image, to_image, flavor,
fakearg="fakearg")
scenario._boot_server.assert_called_once_with(from_image, flavor,
fakearg="fakearg")
server = scenario._boot_server.return_value
scenario._rebuild_server.assert_called_once_with(server, to_image)
scenario._delete_server.assert_called_once_with(server)
def test_boot_and_show_server(self):
server = fakes.FakeServer()
image = fakes.FakeImage()
flavor = fakes.FakeFlavor()
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.MagicMock(return_value=server)
scenario._show_server = mock.MagicMock()
scenario.boot_and_show_server(image, flavor, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(image, flavor,
fakearg="fakearg")
scenario._show_server.assert_called_once_with(server)
@ddt.data({"length": None},
{"length": 10})
@ddt.unpack
def test_boot_and_get_console_server(self, length):
server = fakes.FakeServer()
image = fakes.FakeImage()
flavor = fakes.FakeFlavor()
kwargs = {"fakearg": "fakearg"}
scenario = servers.NovaServers(self.context)
scenario._boot_server = mock.MagicMock(return_value=server)
scenario._get_server_console_output = mock.MagicMock()
scenario.boot_and_get_console_output(image, flavor, length,
**kwargs)
scenario._boot_server.assert_called_once_with(image, flavor,
**kwargs)
scenario._get_server_console_output.assert_called_once_with(server,
length)
@mock.patch(NOVA_SERVERS_MODULE + ".network_wrapper.wrap")
def test_boot_and_associate_floating_ip(self, mock_wrap):
scenario = servers.NovaServers(self.context)
server = mock.Mock()
scenario._boot_server = mock.Mock(return_value=server)
scenario._associate_floating_ip = mock.Mock()
image = "img"
flavor = "flavor"
scenario.boot_and_associate_floating_ip(image, flavor,
fakearg="fakearg")
scenario._boot_server.assert_called_once_with(image, flavor,
fakearg="fakearg")
net_wrap = mock_wrap.return_value
net_wrap.create_floating_ip.assert_called_once_with(
tenant_id=server.tenant_id)
scenario._associate_floating_ip.assert_called_once_with(
server, net_wrap.create_floating_ip.return_value["ip"])
|
|
"""Module for random variables.
This module contains classes for random variables and exceptions.
Classes:
ParameterException: Exception for invalid parameters.
RandomVariable: Abstract class for random variables.
Discrete: Class for discrete random variables.
Gaussian: Class for Gaussian random variables.
"""
from abc import ABC, abstractmethod, abstractproperty, abstractclassmethod
import numpy as np
class ParameterException(Exception):
"""Exception for invalid parameters."""
pass
class RandomVariable(ABC):
"""Abstract base class for all random variables."""
@abstractclassmethod
def unity(cls, *args):
"""Initialize unit element of the random variable."""
@abstractproperty
def dim(self):
"""Dimension of the random variable."""
@abstractmethod
def __str__(self):
"""String representation of the random variable."""
@abstractmethod
def __add__(self):
"""Addition of two random variables."""
@abstractmethod
def __sub__(self):
"""Subtraction of two random variables."""
@abstractmethod
def __mul__(self):
"""Multiplication of two random variables."""
@abstractmethod
def __iadd__(self):
"""Augmented addition of two random variables."""
@abstractmethod
def __isub__(self):
"""Augmented subtraction of two random variables."""
@abstractmethod
def __imul__(self):
"""Augmented multiplication of two random variables."""
@abstractmethod
def __eq__(self):
"""Compare two random variables for equality."""
@abstractmethod
def normalize(self):
"""Normalize the random variable."""
@abstractmethod
def marginalize(self):
"""Return marginal of the random variable."""
@abstractmethod
def maximize(self):
"""Return maximum of the random variable."""
@abstractmethod
def argmax(self):
"""Return dimension of the maximum of the random variable."""
@abstractmethod
def log(self):
"""Return natural logarithm of the random variable."""
class Discrete(RandomVariable):
"""Class for discrete random variables.
A discrete random variable is defined by a single- or multi-dimensional
probability mass function. In addition, each dimension of the probability
mass function has to be associated with a variable. The variable is
represented by a variable node of the comprehensive factor graph.
"""
def __init__(self, raw_pmf, *args):
"""Initialize a discrete random variable.
Create a new discrete random variable with the given probability
mass function over the given variable nodes.
Args:
raw_pmf: A Numpy array representing the probability mass function.
The probability mass function does not need to be normalized.
*args: Instances of the class VNode representing the variables of
the probability mass function. The number of the positional
arguments must match the number of dimensions of the Numpy
array.
Raises:
ParameterException: An error occurred initializing with invalid
parameters.
"""
pmf = np.asarray(raw_pmf, dtype=np.float64)
# Set probability mass function
self._pmf = pmf
# Set variable nodes for dimensions
if np.ndim(pmf) != len(args):
raise ParameterException('Dimension mismatch.')
else:
self._dim = args
@classmethod
def unity(cls, *args):
"""Initialize unit element of a discrete random variable.
Args:
*args: Instances of the class VNode representing the variables of
the probability mass function. The number of the positional
arguments must match the number of dimensions of the Numpy
array.
Raises:
ParameterException: An error occurred initializing with invalid
parameters.
"""
n = len(args)
return cls(np.ones((1,) * n), *args)
@property
def pmf(self):
return self._pmf
@property
def dim(self):
return self._dim
def __str__(self):
"""Return string representation of the discrete random variable."""
return np.array2string(self.pmf, separator=',', sign=' ')
def __add__(self, other):
"""Add other to self and return the result.
Args:
other: Summand for the discrete random variable.
Returns:
A new discrete random variable representing the summation.
"""
# Verify dimensions of summand and summand.
if len(self.dim) < len(other.dim):
self._expand(other.dim, other.pmf.shape)
elif len(self.dim) > len(other.dim):
other._expand(self.dim, self.pmf.shape)
pmf = self.pmf + other.pmf
return Discrete(pmf, *self.dim)
def __sub__(self, other):
"""Subtract other from self and return the result.
Args:
other: Subtrahend for the discrete random variable.
Returns:
A new discrete random variable representing the subtraction.
"""
# Verify dimensions of minuend and subtrahend.
if len(self.dim) < len(other.dim):
self._expand(other.dim, other.pmf.shape)
elif len(self.dim) > len(other.dim):
other._expand(self.dim, self.pmf.shape)
pmf = self.pmf - other.pmf
return Discrete(pmf, *self.dim)
def __mul__(self, other):
"""Multiply other with self and return the result.
Args:
other: Multiplier for the discrete random variable.
Returns:
A new discrete random variable representing the multiplication.
"""
# Verify dimensions of multiplicand and multiplier.
if len(self.dim) < len(other.dim):
self._expand(other.dim, other.pmf.shape)
elif len(self.dim) > len(other.dim):
other._expand(self.dim, self.pmf.shape)
pmf = self.pmf * other.pmf
return Discrete(pmf, *self.dim)
def __iadd__(self, other):
"""Method for augmented addition.
Args:
other: Summand for the discrete random variable.
Returns:
A new discrete random variable representing the summation.
"""
return self.__add__(other)
def __isub__(self, other):
"""Method for augmented subtraction.
Args:
other: Subtrahend for the discrete random variable.
Returns:
A new discrete random variable representing the subtraction.
"""
return self.__sub__(other)
def __imul__(self, other):
"""Method for augmented multiplication.
Args:
other: Multiplier for the discrete random variable.
Returns:
A new discrete random variable representing the multiplication.
"""
return self.__mul__(other)
def __eq__(self, other):
"""Compare self with other and return the boolean result.
Two discrete random variables are equal only if the probability mass
functions are equal and the order of dimensions are equal.
"""
return np.allclose(self.pmf, other.pmf) \
and self.dim == other.dim
def _expand(self, dims, states):
"""Expand dimensions.
Expand the discrete random variable along the given new dimensions.
Args:
dims: List of discrete random variables.
"""
reps = [1, ] * len(dims)
# Extract missing dimensions
diff = [i for i, d in enumerate(dims) if d not in self.dim]
# Expand missing dimensions
for d in diff:
self._pmf = np.expand_dims(self.pmf, axis=d)
reps[d] = states[d]
# Repeat missing dimensions
self._pmf = np.tile(self.pmf, reps)
self._dim = dims
def normalize(self):
"""Normalize probability mass function."""
pmf = self.pmf / np.abs(np.sum(self.pmf))
return Discrete(pmf, *self.dim)
def marginalize(self, *dims, normalize=True):
"""Return the marginal for given dimensions.
The probability mass function of the discrete random variable
is marginalized along the given dimensions.
Args:
*dims: Instances of discrete random variables, which should be
marginalized out.
normalize: Boolean flag if probability mass function should be
normalized after marginalization.
Returns:
A new discrete random variable representing the marginal.
"""
axis = tuple(idx for idx, d in enumerate(self.dim) if d in dims)
pmf = np.sum(self.pmf, axis)
if normalize:
pmf /= np.sum(pmf)
new_dims = tuple(d for d in self.dim if d not in dims)
return Discrete(pmf, *new_dims)
def maximize(self, *dims, normalize=True):
"""Return the maximum for given dimensions.
The probability mass function of the discrete random variable
is maximized along the given dimensions.
Args:
*dims: Instances of discrete random variables, which should be
maximized out.
normalize: Boolean flag if probability mass function should be
normalized after marginalization.
Returns:
A new discrete random variable representing the maximum.
"""
axis = tuple(idx for idx, d in enumerate(self.dim) if d in dims)
pmf = np.amax(self.pmf, axis)
if normalize:
pmf /= np.sum(pmf)
new_dims = tuple(d for d in self.dim if d not in dims)
return Discrete(pmf, *new_dims)
def argmax(self, dim=None):
"""Return the dimension index of the maximum.
Args:
dim: An optional discrete random variable along a marginalization
should be performed and the maximum is searched over the
remaining dimensions. In the case of None, the maximum is
search along all dimensions.
Returns:
An integer representing the dimension of the maximum.
"""
if dim is None:
return np.unravel_index(self.pmf.argmax(), self.pmf.shape)
m = self.marginalize(dim)
return np.argmax(m.pmf)
def log(self):
"""Natural logarithm of the discrete random variable.
Returns:
A new discrete random variable with the natural logarithm of the
probablitiy mass function.
"""
return Discrete(np.log(self.pmf), *self.dim)
class Gaussian(RandomVariable):
"""Class for Gaussian random variables.
A Gaussian random variable is defined by a mean vector and a covariance
matrix. In addition, each dimension of the mean vector and the covariance
matrix has to be associated with a variable. The variable is
represented by a variable node of the comprehensive factor graph.
"""
def __init__(self, raw_mean, raw_cov, *args):
"""Initialize a Gaussian random variable.
Create a new Gaussian random variable with the given mean vector and
the given covariance matrix over the given variable nodes.
Args:
raw_mean: A Numpy array representing the mean vector.
raw_cov: A Numpy array representing the covariance matrix.
*args: Instances of the class VNode representing the variables of
the mean vector and covariance matrix, respectively. The number
of the positional arguments must match the number of dimensions
of the Numpy arrays.
Raises:
ParameterException: An error occurred initializing with invalid
parameters.
"""
if raw_mean is not None and raw_cov is not None:
mean = np.asarray(raw_mean, dtype=np.float64)
cov = np.asarray(raw_cov, dtype=np.float64)
# Set mean vector and covariance matrix
if mean.shape[0] != cov.shape[0]:
raise ParameterException('Dimension mismatch.')
else:
# Precision matrix
self._W = np.linalg.inv(np.asarray(cov))
# Precision-mean vector
self._Wm = np.dot(self._W, np.asarray(mean))
# Set variable nodes for dimensions
if cov.shape[0] != len(args):
raise ParameterException('Dimension mismatch.')
else:
self._dim = args
else:
self._dim = args
@classmethod
def unity(cls, *args):
"""Initialize unit element of a Gaussian random variable.
Args:
*args: Instances of the class VNode representing the variables of
the mean vector and covariance matrix, respectively. The number
of the positional arguments must match the number of dimensions
of the Numpy arrays.
Raises:
ParameterException: An error occurred initializing with invalid
parameters.
"""
n = len(args)
return cls(np.diag(np.zeros(n)), np.diag(np.ones(n) * np.Inf), *args)
@classmethod
def inf_form(cls, raw_W, raw_Wm, *args):
"""Initialize a Gaussian random variable using the information form.
Create a new Gaussian random variable with the given mean vector and
the given covariance matrix over the given variable nodes.
Args:
raw_W: A Numpy array representing the precision matrix.
raw_Wm: A Numpy array representing the precision-mean vector.
*args: Instances of the class VNode representing the variables of
the mean vector and covariance matrix, respectively. The number
of the positional arguments must match the number of dimensions
of the Numpy arrays.
Raises:
ParameterException: An error occurred initializing with invalid
parameters.
"""
g = cls(None, None, *args)
g._W = np.asarray(raw_W, dtype=np.float64)
g._Wm = np.asarray(raw_Wm, dtype=np.float64)
return g
@property
def mean(self):
return np.dot(np.linalg.inv(self._W), self._Wm)
@property
def cov(self):
return np.linalg.inv(self._W)
@property
def dim(self):
return self._dim
def __str__(self):
"""Return string representation of the Gaussian random variable."""
mean = np.array2string(self.mean, separator=',', sign=' ')
cov = np.array2string(self.cov, separator=',', sign=' ')
return "%s\n%s" % (mean, cov)
def __add__(self, other):
"""Add other to self and return the result.
Args:
other: Summand for the Gaussian random variable.
Returns:
A new Gaussian random variable representing the summation.
"""
return Gaussian(self.mean + other.mean,
self.cov + other.cov,
*self.dim)
def __sub__(self, other):
"""Subtract other from self and return the result.
Args:
other: Subrahend for the Gaussian random variable.
Returns:
A new Gaussian random variable representing the subtraction.
"""
return Gaussian(self.mean - other.mean,
self.cov - other.cov,
*self.dim)
def __mul__(self, other):
"""Multiply other with self and return the result.
Args:
other: Multiplier for the Gaussian random variable.
Returns:
A new Gaussian random variable representing the multiplication.
"""
W = self._W + other._W
Wm = self._Wm + other._Wm
return Gaussian.inf_form(W, Wm, *self.dim)
def __iadd__(self, other):
"""Method for augmented addition.
Args:
other: Summand for the Gaussian random variable.
Returns:
A new Gaussian random variable representing the summation.
"""
return self.__add__(other)
def __isub__(self, other):
"""Method for augmented subtraction.
Args:
other: Subtrahend for the Gaussian random variable.
Returns:
A new Gaussian random variable representing the subtraction.
"""
return self.__sub__(other)
def __imul__(self, other):
"""Method for augmented multiplication.
Args:
other: Multiplier for the Gaussian random variable.
Returns:
A new Gaussian random variable representing the multiplication.
"""
return self.__mul__(other)
def __eq__(self, other):
"""Compare self with other and return the boolean result.
Two Gaussian random variables are equal only if the mean vectors and
the covariance matrices are equal and the order of dimensions are
equal.
"""
return np.allclose(self._W, other._W) \
and np.allclose(self._Wm, other._Wm) \
and self.dim == other.dim
def normalize(self):
"""Normalize probability density function."""
return self
def marginalize(self, *dims, normalize=True):
"""Return the marginal for given dimensions.
The probability density function of the Gaussian random variable
is marginalized along the given dimensions.
Args:
*dims: Instances of Gaussian random variables, which should be
marginalized out.
normalize: Boolean flag if probability mass function should be
normalized after marginalization.
Returns:
A new Gaussian random variable representing the marginal.
"""
axis = tuple(idx for idx, d in enumerate(self.dim) if d not in dims)
mean = self.mean[np.ix_(axis, [0])]
cov = self.cov[np.ix_(axis, axis)]
new_dims = tuple(d for d in self.dim if d not in dims)
return Gaussian(mean, cov, *new_dims)
def maximize(self, *dims):
"""Return the maximum for given dimensions.
The probability density function of the Gaussian random variable
is maximized along the given dimensions.
Args:
*dims: Instances of Gaussian random variables, which should be
maximized out.
Returns:
A new Gaussian random variable representing the maximum.
"""
axis = tuple(idx for idx, d in enumerate(self.dim) if d not in dims)
mean = self.mean[np.ix_(axis, [0])]
cov = self.cov[np.ix_(axis, axis)]
new_dims = tuple(d for d in self.dim if d not in dims)
return Gaussian(mean, cov, *new_dims)
def argmax(self, dim=None):
"""Return the dimension index of the maximum.
Args:
dim: An optional Gaussian random variable along a marginalization
should be performed and the maximum is searched over the
remaining dimensions. In the case of None, the maximum is
search along all dimensions.
Returns:
An integer representing the dimension of the maximum.
"""
if dim is None:
return self.mean
m = self.marginalize(dim)
return m.mean
def log(self):
"""Natural logarithm of the Gaussian random variable.
Returns:
A new Gaussian random variable with the natural logarithm of the
probability density function.
"""
raise NotImplementedError
|
|
#-*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from unittest import TestCase
__all__ = ['TestWithBonitaServer','TestWithMockedServer',
'build_dumb_bonita_error_body','build_bonita_user_xml',
'build_bonita_group_xml','build_bonita_role_xml',
'build_bonita_membership_xml','build_xml_set','build_xml_list']
class TestWithBonitaServer(TestCase):
def __init__(self,methodName='runTest'):
import pybonita
pybonita.BonitaServer = pybonita.BonitaServer
super(TestWithBonitaServer,self).__init__(methodName)
class TestWithMockedServer(TestCase):
def __init__(self,methodName='runTest'):
from pybonita import BonitaServer
import pybonita.tests
BonitaServer._instance = pybonita.tests.BonitaMockedServerImpl()
BonitaServer.__metaclass__ = pybonita.tests._MetaBonitaMockedServer
BonitaServer.set_response_list = set_response_list
super(TestWithMockedServer,self).__init__(methodName)
class _MetaBonitaMockedServer(type):
def __instancecheck__(self, inst):
return isinstance(inst, BonitaMockedServerImpl)
class BonitaMockedServerImpl(object):
__metaclass__ = _MetaBonitaMockedServer
def __init__(self):
self._user = "john"
self._host = None
self._port = None
self._login = None
self._password = None
self._ready = False
def sendRESTRequest(self, url, user=None, data=dict()):
""" Do not call a BonitaServer, but rather access the reponses list given prior to this method call.
"""
import re
from pybonita.exception import BonitaHTTPError
(status, content_type, data) = self.__class__.extract_response(url,'POST')
if int(status)/100 != 2:
soup = BeautifulSoup(data,'xml')
if soup == None:
raise Exception('data : %s[%s] and can\'t build soup with that' % (str(data),type(data)))
soup_exception = soup.find(name=re.compile("Exception"))
bonita_exception = soup_exception.name if 'name' in dir(soup_exception) else unicode(soup_exception)
message = soup.detailmessage.string if soup.detailmessage is not None else ''
code = soup.errorCode
raise BonitaHTTPError(bonita_exception,code,message)
return data
def _get_host(self):
return self._host
def _set_host(self, value):
if type(value) != str:
raise TypeError(u"host must be a string")
self._host = value
host = property(_get_host, _set_host, None, u"Bonita REST API server host")
def _get_port(self):
return self._port
def _set_port(self, value):
if type(value) != int:
raise TypeError(u"port must be an integer")
self._port = value
port = property(_get_port, _set_port, None, u"Bonita REST API server port")
def _get_login(self):
return self._login
def _set_login(self, value):
if type(value) != str:
raise TypeError(u"login must be a string")
self._login = value
login = property(_get_login, _set_login, None, u"Bonita REST request credential login")
def _get_password(self):
return self._password
def _set_password(self, value):
if type(value) != str:
raise TypeError
self._password = value
password = property(_get_password, _set_password, None, u"Bonita REST request credential password")
class ResponseList(object):
# Internal singleton class for responses list
def __init__(self):
self.responses = {}
def clear_responses(self):
self.responses = {}
def add_or_augment_response_list(self,url,method,status,type,message):
if not (url,method) in self.responses:
self.responses[(url,method)] = [{'status':status,'type':type,'message':message}]
else:
# Add the new entry at end of list of responses for the current (url,method)
self.responses[(url,method)].append({'status':status,'type':type,'message':message})
def get_responses(self):
return self.responses
def search_and_consume_reponse(self,full_url,method):
"""
"""
# Look for url,method in MockedServer responses
from requests.packages import urllib3
parse = urllib3.util.parse_url(full_url).path
split_parse = parse.split('/')
n = len(split_parse)
while n > 0 and ('/'.join(split_parse[0:n]),'POST') not in self.responses:
n -= 1
if n == 0:
raise Exception('No already sets response for url %s and method %s' % (parse,method))
url_parse = '/'.join(split_parse[0:n])
# Extract the first response in row
url_method_responses = self.responses.pop((url_parse,method))
current_response = url_method_responses[0]
if len(url_method_responses) > 1:
self.responses[(url_parse,method)] = url_method_responses[1:]
status = str(current_response['status'])
data = current_response['message']
if current_response['type'] != None or current_response['type'] != '':
content_type = current_response['type']
elif (current_response['type'] == None or current_response['type'] == '') and isinstance(data,(str,unicode)):
content_type = 'text/html'
else:
raise Exception('content_type not specified for url %s method %s' % (url_parse,method))
return status, content_type, data
response_list_instance = None
@classmethod
def clear_response_list(cls):
response_list = cls.get_response_list()
response_list.clear_responses()
@classmethod
def left_responses_in_list(cls):
return len(cls.get_response_list().get_responses()) > 0
@classmethod
def get_response_list(cls):
if cls.response_list_instance == None:
cls.response_list_instance = cls.ResponseList()
return cls.response_list_instance
@classmethod
def extract_response(cls,url,method):
""" Retrieve a response from already sets response list
"""
if len(cls.get_response_list().get_responses()) == 0:
# No already set response
raise Exception('No prepared response list')
else:
# Already set responses : find the good one
status,content_type,data = cls.get_response_list().search_and_consume_reponse(url,method)
return status,content_type,data
@classmethod
def set_response_list(cls,response_list):
""" Set the response list for next requests.
:param list: List of entries as detailled after
:raise: ValueError if response_list is not a list and each entry does not belong to the correct schema
An Entry is a dict containing :
:base_url: base URL the Request will be call (any further params will be ignored)
:method: the HTTP method of the Request. If not specified, default will be POST
:status: the HTTP response status
:type: the HTTP response mime type. If not specified, default will be xml
:message: the HTTP response body
An Entry could also be a list of only 3 params :
:0: base_url
:1: status
:2: message
"""
if not isinstance(response_list,list):
raise ValueError('response_list arg must be a list')
# Run through the responses
for response in response_list:
if not isinstance(response,(list,dict)):
raise ValueError('response_list entry must be a list or dict')
if isinstance(response,list):
if len(response) != 3:
raise ValueError('response_list entry must have 3 fields')
# Grab each fields
url = response[0]
method = 'POST'
status = response[1]
type = 'xml'
message = response[2]
else:
# response is a dict
url = response.get('base_url')
method = response.get('method','POST')
status = response.get('status')
type = response.get('type','xml')
message = response.get('message')
response_list = BonitaMockedServerImpl.get_response_list()
response_list.add_or_augment_response_list(url,method,status,type,message)
from pybonita.user import BonitaGroup, BonitaRole, BonitaMembership
from pybonita.utils import xml_find
def build_dumb_bonita_error_body(exception='',code='',message=''):
# Add your own Bonita java Exception in this dict to make your call shorter
# So you can call with exception='UserNotFoundException'
# rather than exception = 'org.ow2.bonita.facade.exception.UserNotFoundException'
java_exception_dict = {'UserNotFoundException':'org.ow2.bonita.facade.exception.UserNotFoundException',
'ProcessNotFoundException':'org.ow2.bonita.facade.exception.ProcessNotFoundException',
'GroupNotFoundException':'org.ow2.bonita.facade.exception.GroupNotFoundException',
'RoleNotFoundException':'org.ow2.bonita.facade.exception.RoleNotFoundException'}
exception_text = java_exception_dict.get(exception,exception)
# Build XML body
soup = BeautifulSoup('','xml')
tag_exception = soup.new_tag(exception_text)
tag_code = soup.new_tag('errorCode')
tag_message = soup.new_tag('detailMessage')
tag_code.string = code
tag_message.string = message
soup.insert(0,tag_exception)
tag_exception.insert(0,tag_code)
tag_exception.insert(1,tag_message)
return unicode(soup)
def build_bonita_user_xml(uuid,password='',username='',additional_properties = {}):
""" Build XML for a Bonita User information """
# Build XML body
soup = BeautifulSoup('','xml')
tag_user = soup.new_tag('user')
tag_uuid = soup.new_tag('uuid')
tag_password = soup.new_tag('password')
tag_username = soup.new_tag('username')
tag_uuid.string = uuid
tag_password.string = password
tag_username.string = username
user_tags = [tag_uuid,tag_password,tag_username]
for tag in user_tags:
tag_user.append(tag)
# Extract memberships
memberships = additional_properties.pop('memberships',[])
# Add additional properties
for (property_key, property_value) in additional_properties.iteritems():
# Create an additional tag
tag_property = soup.new_tag(property_key)
tag_property.string = property_value
# Add the new property to the User tag
tag_user.append(tag_property)
# Add memberships
for membership in memberships:
tag_memberships = soup.new_tag('memberships')
if isinstance(membership, BonitaMembership):
membership_xml = build_bonita_membership_xml(membership.uuid,membership.role,membership.group)
membership_soup = BeautifulSoup(membership_xml,'xml')
tag_memberships.append(xml_find(membership_soup,'membership'))
tag_user.append(tag_memberships)
return unicode(tag_user)
def build_bonita_group_xml(uuid, name, description='', label='', dbid='', parent=None, as_parent=False, with_class=False):
""" Build XML for a Bonita Group information
:param uuid:
:type uuid:
:param name:
:type name:
:param description:
:type description:
:param label:
:type label:
:param dbid:
:type dbid:
:param parent: parent of this Group, rather as a BonitaGroup or by XML
:type parent: BonitaGroup ou unicode
:param as_parent: State that the XML group must be provided as a parentGroup (default False)
:type as_parent: bool
"""
# Build XML body
soup = BeautifulSoup('','xml')
if as_parent:
tag_group = soup.new_tag('parentGroup')
else:
tag_group = soup.new_tag('Group')
if as_parent or with_class:
tag_group.attrs['class']='Group'
tag_uuid = soup.new_tag('uuid')
tag_description = soup.new_tag('description')
tag_name =soup.new_tag('name')
tag_label = soup.new_tag('label')
tag_dbid = soup.new_tag('dbid')
tag_uuid.string = uuid
tag_description.string = description
tag_name.string = name
tag_label.string = label
tag_dbid.string = dbid
group_tags = [tag_uuid,tag_description,tag_name,tag_label,tag_dbid]
for tag in group_tags:
tag_group.append(tag)
if parent:
if isinstance(parent,BonitaGroup):
# Build parent XML definition
parent_xml = build_bonita_group_xml(parent.uuid, parent.name, parent.description, parent.label,parent.parent, True)
else:
# parent XML is directly in parent
parent_xml = parent
parent_soup = BeautifulSoup(parent_xml,'xml')
tag_group.append(parent_soup.parentGroup)
return unicode(tag_group)
def build_bonita_process_definition_xml(uuid, name=None, version=None, label=None, description=None):
soup = BeautifulSoup('','xml')
tag_process = soup.new_tag("ProcessDefinition")
tag_description = soup.new_tag("description")
tag_description.string = description if description != None else "%s description" % uuid
tag_name = soup.new_tag("name")
tag_name.string = name if name != None else uuid.split("--")[0]
tag_label = soup.new_tag("label")
tag_label.string = label if label != None else uuid.split("--")[0]
tag_uuid = soup.new_tag("uuid")
tag_value = soup.new_tag("value")
tag_value.string = uuid
tag_uuid.append(tag_value)
tag_version = soup.new_tag("version")
tag_version.string = version if version != None else uuid.split("--")[1]
process_tags = [tag_description, tag_name, tag_label, tag_uuid, tag_version]
for tag in process_tags:
tag_process.append(tag)
return unicode(tag_process)
def build_bonita_role_xml(uuid,name,description='',label='',dbid='',with_class=False):
""" Build XML for a Bonita Role information """
# Build XML body
soup = BeautifulSoup('','xml')
tag_role = soup.new_tag('Role')
if with_class:
tag_role.attrs['class']='Role'
tag_uuid = soup.new_tag('uuid')
tag_name = soup.new_tag('name')
tag_description = soup.new_tag('description')
tag_label = soup.new_tag('label')
tag_dbid = soup.new_tag('dbid')
tag_uuid.string = uuid
tag_name.string = name
tag_description.string = description
tag_label.string = label
tag_dbid.string = dbid
role_tags = [tag_uuid,tag_name,tag_description,tag_label,tag_dbid]
for tag in role_tags:
tag_role.append(tag)
return unicode(tag_role)
def build_bonita_membership_xml(uuid,role,group, dbid=''):
""" Build XML for a Bonita Membership information """
# Build XML body
soup = BeautifulSoup('','xml')
tag_membership = soup.new_tag('Membership')
tag_uuid = soup.new_tag('uuid')
tag_dbid = soup.new_tag('dbid')
tag_uuid.string = uuid
tag_dbid.string = dbid
membership_tags = [tag_uuid,tag_dbid]
for tag in membership_tags:
tag_membership.append(tag)
if isinstance(group,BonitaGroup):
# Build group XML definition
group_xml = build_bonita_group_xml(group.uuid, group.name, group.description, group.label,parent=group.parent,with_class=True)
group_soup = BeautifulSoup(group_xml,'xml')
tag_membership.append(xml_find(group_soup,'group'))
else:
# group XML is directly in group param
group_soup = BeautifulSoup(group,'xml')
tag_membership.append(group_soup.contents[0])
if isinstance(role,BonitaRole):
# Build group XML definition
role_xml = build_bonita_role_xml(role.uuid,role.name,role.description,role.label,role.dbid if 'dbid' in dir(role) else '',with_class=True)
role_soup = BeautifulSoup(role_xml,'xml')
tag_membership.append(xml_find(role_soup,'role'))
else:
# group XML is directly in group param
role_soup = BeautifulSoup(role,'xml')
tag_membership.append(role_soup.contents[0])
return unicode(tag_membership)
def build_xml_set(xml_list):
""" Build a Bonita Set of element """
# Build XML body
soup = BeautifulSoup('','xml')
tag_set = soup.new_tag('set')
for xml_elem in xml_list:
elem_soup = BeautifulSoup(xml_elem,'xml')
tag_set.append(elem_soup.contents[0])
return unicode(tag_set)
def build_xml_list(xml_list):
""" Build a Bonita List of element """
# Build XML body
soup = BeautifulSoup('','xml')
tag_set = soup.new_tag('list')
for xml_elem in xml_list:
elem_soup = BeautifulSoup(xml_elem,'xml')
tag_set.append(elem_soup.contents[0])
return unicode(tag_set)
|
|
"""Tests for hyp2f1 for complex values.
Author: Albert Steppi, with credit to Adam Kullberg (FormerPhycisist) for
the implementation of mp_hyp2f1 below, which modifies mpmath's hyp2f1 to
return the same branch as scipy's on the standard branch cut.
"""
import pytest
import numpy as np
from typing import NamedTuple
from numpy.testing import assert_allclose
from scipy.special import hyp2f1
from scipy.special._testutils import check_version, MissingModule
try:
import mpmath
except ImportError:
mpmath = MissingModule("mpmath")
def mp_hyp2f1(a, b, c, z):
"""Return mpmath hyp2f1 calculated on same branch as scipy hyp2f1.
For most values of a,b,c mpmath returns the x - 0j branch of hyp2f1 on the
branch cut x=(1,inf) whereas scipy's hyp2f1 calculates the x + 0j branch.
Thus, to generate the right comparison values on the branch cut, we
evaluate mpmath.hyp2f1 at x + 1e-15*j.
The exception to this occurs when c-a=-m in which case both mpmath and
scipy calculate the x + 0j branch on the branch cut. When this happens
mpmath.hyp2f1 will be evaluated at the original z point.
"""
on_branch_cut = z.real > 1.0 and abs(z.imag) < 1.0e-15
cond1 = abs(c - a - round(c - a)) < 1.0e-15 and round(c - a) <= 0
cond2 = abs(c - b - round(c - b)) < 1.0e-15 and round(c - b) <= 0
# Make sure imaginary part is *exactly* zero
if on_branch_cut:
z = z.real + 0.0j
if on_branch_cut and not (cond1 or cond2):
z_mpmath = z.real + 1.0e-15j
else:
z_mpmath = z
return complex(mpmath.hyp2f1(a, b, c, z_mpmath))
class Hyp2f1TestCase(NamedTuple):
a: float
b: float
c: float
z: complex
expected: complex
rtol: float
class TestHyp2f1:
"""Tests for hyp2f1 for complex values.
Expected values for test cases were computed using mpmath. See
`scipy.special._precompute.hyp2f1_data`. The verbose style of specifying
test cases is used for readability and to make it easier to mark individual
cases as expected to fail. Expected failures are used to highlight cases
where improvements are needed. See
`scipy.special._precompute.hyp2f1_data.make_hyp2f1_test_cases` for a
function to generate the boilerplate for the test cases.
Assertions have been added to each test to ensure that the test cases match
the situations that are intended. A final test `test_test_hyp2f1` checks
that the expected values in the test cases actually match what is computed
by mpmath. This test is marked slow even though it isn't particularly slow
so that it won't run by default on continuous integration builds.
"""
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=-10,
z=0.2 + 0.2j,
expected=np.inf + 0j,
rtol=0
)
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=-10,
z=0 + 0j,
expected=1 + 0j,
rtol=0
),
marks=pytest.mark.xfail(reason="gh-7340")
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0,
c=-10,
z=0.2 + 0.2j,
expected=1 + 0j,
rtol=0
),
marks=pytest.mark.xfail(reason="gh-7340"),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0,
c=0,
z=0.2 + 0.2j,
expected=1 + 0j,
rtol=0,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=0,
z=0.2 + 0.2j,
expected=np.inf + 0j,
rtol=0,
),
marks=pytest.mark.xfail(reason="gh-7340"),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=0,
z=0 + 0j,
expected=np.nan + 0j,
rtol=0,
),
marks=pytest.mark.xfail(reason="gh-7340"),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=-5,
c=-10,
z=0.2 + 0.2j,
expected=(1.0495404166666666+0.05708208333333334j),
rtol=1e-15,
),
marks=pytest.mark.xfail(reason="gh-7340"),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=-10,
c=-10,
z=0.2 + 0.2j,
expected=(1.092966013125+0.13455014673750001j),
rtol=1e-15,
),
marks=pytest.mark.xfail(reason="gh-7340"),
),
pytest.param(
Hyp2f1TestCase(
a=-10,
b=-20,
c=-10,
z=0.2 + 0.2j,
expected=(-0.07712512000000005+0.12752814080000005j),
rtol=1e-13,
),
marks=pytest.mark.xfail(reason="gh-7340"),
),
pytest.param(
Hyp2f1TestCase(
a=-1,
b=3.2,
c=-1,
z=0.2 + 0.2j,
expected=(1.6400000000000001+0.6400000000000001j),
rtol=1e-13,
),
marks=pytest.mark.xfail(reason="gh-7340"),
),
]
)
def test_c_non_positive_int(self, hyp2f1_test_case):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=1.5,
z=1 + 0j,
expected=1.1496439092239847 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=12.3,
b=8.0,
c=20.31,
z=1 + 0j,
expected=69280986.75273195 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=290.2,
b=321.5,
c=700.1,
z=1 + 0j,
expected=1.3396562400934e117 + 0j,
rtol=1e-12,
),
marks=pytest.mark.xfail(reason="overflow"),
),
pytest.param(
Hyp2f1TestCase(
a=-102.1,
b=-20.3,
c=1.3,
z=1 + 0j,
expected=2.7899070752746906e22 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=-202.6,
b=60.3,
c=1.5,
z=1 + 0j,
expected=-1.3113641413099326e-56 + 0j,
rtol=1e-12,
),
marks=pytest.mark.xfail(reason="underflow"),
),
],
)
def test_unital_argument(self, hyp2f1_test_case):
"""Tests for case z = 1, c - a - b > 0.
Expected answers computed using mpmath.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert z == 1 and c - a - b > 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=1.3,
z=-1 + 0j,
expected=0.9428846409614143 + 0j,
rtol=1e-15),
),
pytest.param(
Hyp2f1TestCase(
a=12.3,
b=8.0,
c=5.300000000000001,
z=-1 + 0j,
expected=-4.845809986595704e-06 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=221.5,
b=90.2,
c=132.3,
z=-1 + 0j,
expected=2.0490488728377282e-42 + 0j,
rtol=1e-7,
),
),
pytest.param(
Hyp2f1TestCase(
a=-102.1,
b=-20.3,
c=-80.8,
z=-1 + 0j,
expected=45143784.46783885 + 0j,
rtol=1e-7,
),
marks=pytest.mark.xfail,
),
],
)
def test_special_case_z_near_minus_1(self, hyp2f1_test_case):
"""Tests for case z ~ -1, c ~ 1 + a - b
Expected answers computed using mpmath.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(1 + a - b - c) < 1e-15 and abs(z + 1) < 1e-15
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-4,
b=2.02764642551431,
c=1.0561196186065624,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.0031961077109535375-0.0011313924606557173j),
rtol=1e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-8,
b=-7.937789122896016,
c=-15.964218273004214,
z=(2-0.10526315789473695j),
expected=(0.005543763196412503-0.0025948879065698306j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-8,
b=8.095813935368371,
c=4.0013768449590685,
z=(0.9473684210526314-0.10526315789473695j),
expected=(-0.0003054674127221263-9.261359291755414e-05j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-4,
b=-3.956227226099288,
c=-3.9316537064827854,
z=(1.1578947368421053-0.3157894736842106j),
expected=(-0.0020809502580892937-0.0041877333232365095j),
rtol=1e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=-4,
c=2.050308316530781,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.0011282435590058734+0.0002027062303465851j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-8,
c=-15.964218273004214,
z=(1.3684210526315788+0.10526315789473673j),
expected=(-9.134907719238265e-05-0.00040219233987390723j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-4,
c=4.0013768449590685,
z=(0.9473684210526314-0.10526315789473695j),
expected=(-0.000519013062087489-0.0005855883076830948j),
rtol=5e-12,
),
),
]
)
def test_a_b_negative_int(self, hyp2f1_test_case):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert a == int(a) and a < 0 or b == int(b) and b < 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-0.9629749245209605,
c=-15.5,
z=(1.1578947368421053-1.1578947368421053j),
expected=(0.9778506962676361+0.044083801141231616j),
rtol=1e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-3.9316537064827854,
c=1.5,
z=(0.9473684210526314-0.10526315789473695j),
expected=(4.0793167523167675-10.11694246310966j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-0.9629749245209605,
c=2.5,
z=(1.1578947368421053-0.10526315789473695j),
expected=(-2.9692999501916915+0.6394599899845594j),
rtol=1e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-0.9629749245209605,
c=-15.5,
z=(1.5789473684210522-1.1578947368421053j),
expected=(0.9493076367106102-0.04316852977183447j),
rtol=1e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.5,
c=-15.5,
z=(0.5263157894736841+0.10526315789473673j),
expected=(0.9844377175631795-0.003120587561483841j),
rtol=1e-10,
),
),
],
)
def test_a_b_neg_int_after_euler_hypergeometric_transformation(
self, hyp2f1_test_case
):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert ( # Tests the test
(abs(c - a - int(c - a)) < 1e-15 and c - a < 0) or
(abs(c - b - int(c - b)) < 1e-15 and c - b < 0)
)
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.10526315789473673-0.3157894736842106j),
expected=(0.9941449585778349+0.01756335047931358j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.5263157894736841+0.5263157894736841j),
expected=(1.0388722293372104-0.09549450380041416j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=1.0561196186065624,
c=-7.93846038215665,
z=(0.10526315789473673+0.7368421052631575j),
expected=(2.1948378809826434+24.934157235172222j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=16.088264119063613,
c=8.031683612216888,
z=(0.3157894736842106-0.736842105263158j),
expected=(-0.4075277891264672-0.06819344579666956j),
rtol=1e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=2.050308316530781,
c=8.031683612216888,
z=(0.7368421052631575-0.10526315789473695j),
expected=(2.833535530740603-0.6925373701408158j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=2.050308316530781,
c=4.078873014294075,
z=(0.10526315789473673-0.3157894736842106j),
expected=(1.005347176329683-0.3580736009337313j),
rtol=5e-16,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.3157894736842106-0.5263157894736843j),
expected=(0.9824353641135369+0.029271018868990268j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-159.63511401609862,
z=(0.3157894736842106-0.5263157894736843j),
expected=(0.9982436200365834+0.002927268199671111j),
rtol=1e-7,
),
marks=pytest.mark.xfail(reason="Poor convergence.")
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=16.088264119063613,
c=8.031683612216888,
z=(0.5263157894736841-0.5263157894736843j),
expected=(-0.6906825165778091+0.8176575137504892j),
rtol=5e-13,
),
),
]
)
def test_region1(self, hyp2f1_test_case):
"""|z| < 0.9 and real(z) >= 0."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(z) < 0.9 and z.real >= 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=1.0561196186065624,
c=4.078873014294075,
z=(-0.3157894736842106+0.7368421052631575j),
expected=(0.7751915029081136+0.24068493258607315j),
rtol=5e-15,
),
),
]
)
def test_region2(self, hyp2f1_test_case):
"""|z| < 1 and real(z) < 0."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(z) < 1 and z.real < 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.xfail(reason="gh-8054")
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=1.0561196186065624,
c=-0.906685989801748,
z=(0.10526315789473673-0.9473684210526316j),
expected=(-3.9995506969395858-8.179533155338005j),
rtol=1e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=4.5,
c=8.077282662161238,
z=(0.3157894736842106+0.9473684210526314j),
expected=(-0.11307039404123598-0.443195310438102j),
rtol=1e-12,
),
),
]
)
def test_region4(self, hyp2f1_test_case):
"""0.9 <= |z| <= 1 and |1 - z| >= 1.
This region is unhandled by of the standard transformations and
needs special care.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert 0.9 <= abs(z) <= 1 and abs(1 - z) >= 1 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
# Marked as slow so it won't run by default. This test is not slow.
# Including it only increases the running time of the entire suite by
# a handful of hundreths of seconds. This test could become slow in the
# future if enough test cases are added.
@pytest.mark.slow
@check_version(mpmath, "1.0.0")
def test_test_hyp2f1(self):
"""Test that expected values match what is computed by mpmath.
This gathers the parameters for the test cases out of the pytest marks.
The parameters are a, b, c, z, expected, rtol, where expected should
be the value of hyp2f1(a, b, c, z) computed with mpmath. The test
recomputes hyp2f1(a, b, c, z) using mpmath and verifies that expected
actually is the correct value. This allows the data for the tests to
live within the test code instead of an external datafile, while
avoiding having to compute the results with mpmath during the test,
except for when slow tests are being run.
"""
test_methods = [
test_method for test_method in dir(self)
if test_method.startswith('test') and
# Filter properties and attributes (futureproofing).
callable(getattr(self, test_method)) and
# Filter out this test
test_method != 'test_test_hyp2f1'
]
for test_method in test_methods:
params = self._get_test_parameters(getattr(self, test_method))
for a, b, c, z, expected, _ in params:
assert_allclose(mp_hyp2f1(a, b, c, z), expected, rtol=2.25e-16)
def _get_test_parameters(self, test_method):
"""Get pytest.mark parameters for a test in this class."""
return [
case.values[0] for mark in test_method.pytestmark
if mark.name == 'parametrize'
for case in mark.args[1]
]
|
|
# coding: utf-8
import datetime
import json
from base64 import b64encode
from django.conf import settings
from django.http import HttpRequest
from djoauth2.models import AccessToken
from djoauth2.models import AuthorizationCode
from djoauth2.models import Client
from djoauth2.models import Scope
from djoauth2.signals import refresh_token_used_after_invalidation
from djoauth2.tests.abstractions import DJOAuth2TestCase
class TestAccessTokenEndpoint(DJOAuth2TestCase):
def test_ssl_required_insecure_request_fails(self):
""" When SSL is required (as per spec), insecure requests should fail. """
self.initialize(scope_names=['verify', 'autologin'])
settings.DJOAUTH2_SSL_ONLY = True
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
use_ssl=False)
self.assert_token_failure(response, 400)
def test_ssl_required_secure_request_succeeds(self):
""" When SSL is required (as per spec), secure requests should succeed. """
self.initialize(scope_names=['verify', 'autologin'])
settings.DJOAUTH2_SSL_ONLY = True
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
use_ssl=True)
self.assert_token_success(response)
def test_no_ssl_required_secure_request_succeeds(self):
""" When SSL is NOT required (in violation of the spec), secure requests
should still succeed.
"""
self.initialize(scope_names=['verify', 'autologin'])
settings.DJOAUTH2_SSL_ONLY = False
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
use_ssl=True)
self.assert_token_success(response)
def test_no_ssl_required_insecure_request_succeeds(self):
""" When SSL is NOT required (in violation of the spec), insecure requests
should succeed.
"""
self.initialize(scope_names=['verify', 'autologin'])
settings.DJOAUTH2_SSL_ONLY = False
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
use_ssl=True)
self.assert_token_success(response)
def test_get_requests_fail(self):
""" The AccessToken endpoint should only accept POST requests. """
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client, authcode.value, method='GET')
self.assert_token_failure(response, 400)
def test_put_requests_fail(self):
""" The AccessToken endpoint should only accept POST requests. """
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client, authcode.value, method='PUT')
self.assert_token_failure(response, 400)
def test_header_auth_succeeds(self):
""" Clients should be able to successfully authenticate with HTTP Basic
Authentication, as described by
http://tools.ietf.org/html/rfc6749#section-2.3.1 .
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
use_header_auth=True)
self.assert_token_success(response)
def test_malformed_header_auth_fails(self):
""" Requests attempting to authenticate with HTTP Basic Authentication
using a malformed header should fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
header_data={
'HTTP_AUTHORIZATION': 'INVALID',
},
use_header_auth=True)
self.assert_token_failure(response, 400)
def test_header_auth_method_is_not_basic_fails(self):
""" Requests attempting to authenticate with non-Basic HTTP Authentication
should fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
header_data={
'HTTP_AUTHORIZATION': 'Bearer ' + b64encode(
'{}:{}'.format(self.client.key, self.client.secret)),
},
use_header_auth=True)
self.assert_token_failure(response, 400)
def test_header_auth_value_is_malformed_fails(self):
""" Clients attempting to authenticate with HTTP Basic Authentication ...
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
header_data={
'HTTP_AUTHORIZATION': 'Basic ' + 'ThisIsInvalidBase64',
},
use_header_auth=True)
self.assert_token_failure(response, 400)
def test_including_authorization_in_request_uri_fails(self):
""" Clients must not include authorization parameters in the request URI,
as specified by http://tools.ietf.org/html/rfc6749#section-2.3.1 .
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
use_header_auth=True,
endpoint_uri="{}?client_id={}&client_secret={}".format(
self.oauth_client.token_endpoint,
self.client.key,
self.client.secret))
self.assert_token_failure(response, 400)
def test_body_auth_succeeds(self):
""" Clients may include authorization details in the body of the POST request,
as specified by http://tools.ietf.org/html/rfc6749#section-3.2.1 .
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
use_header_auth=False)
self.assert_token_success(response)
def test_multiple_types_of_authentication_fails(self):
""" Clients must only use one authentication method in each request, as
specified by http://tools.ietf.org/html/rfc6749#section-2.3 .
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
request_data = {
'grant_type': 'authorization_code',
'code': authcode.value,
# Include authorzation values in the request body
'client_id': self.client.key,
'client_secret' : self.client.secret,
}
headers = {
'wsgi.url_scheme': 'https' if self.oauth_client.ssl_only else 'http',
# Include authorzation values in the request header
'HTTP_AUTHORIZATION': 'Basic ' + b64encode(
'{}:{}'.format(self.client.key, self.client.secret))}
response = self.oauth_client.post(self.oauth_client.token_endpoint,
request_data, **headers)
self.assert_token_failure(response, 400)
def test_nonexistent_client_fails(self):
""" Requests that attempt to authenticate with a non-existent Client should
fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
self.client.delete()
self.assertFalse(
Client.objects.filter(key=self.client.key).exists())
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
use_header_auth=False)
self.assert_token_failure(response, 401)
def test_missing_secret_fails(self):
""" If the access token request does not include a secret, it should fail. """
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
data={
# Override default client_secret param to not exist.
'client_secret' : None
})
self.assert_token_failure(response, 400)
def test_mismatched_client_and_secret_fails(self):
""" If the access token request includes a secret that doesn't match the
registered secret, the request should fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
# Override default client_secret param to not match the client's registered
# secret.
mismatched_secret = self.client.secret + 'thischangesthevalue'
self.assertNotEqual(mismatched_secret, self.client.secret)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
data={
'client_secret' : mismatched_secret
},
use_header_auth=True)
self.assert_token_failure(response, 401)
def test_invalid_grant_type_fails(self):
""" If a request is made without a valid grant type, the request should
fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
data={
'grant_type': 'invalid'
})
self.assert_token_failure(response, 400)
def test_omitted_grant_type_fails(self):
""" If a request is made without a valid grant type, the request should
fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
data={
'grant_type': None,
})
self.assert_token_failure(response, 400)
class TestRequestAccessTokenFromAuthorizationCode(DJOAuth2TestCase):
def test_request_without_code_value_fails(self):
""" A request with a "grant_type" of "authorization_code" that does not
also include a "code" parameter should fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(
self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client2,
authcode.value,
data={
# Remove the code parameter from the request
'code': None,
})
self.assert_token_failure(response, 400)
def test_nonexistent_code_fails(self):
""" An request based on a non-existent AuthorizationCode value should fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(
self.user, self.client)
authcode.delete()
self.assertFalse(
AuthorizationCode.objects.filter(value=authcode.value).exists())
response = self.oauth_client.request_token_from_authcode(
self.client2, authcode.value)
self.assert_token_failure(response, 400)
def test_mismatched_code_and_client_fails(self):
""" If the code authorized by a user is not associated with the OAuth
client making the access token request, the request will fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
default_client_authcode = self.create_authorization_code(
self.user, self.client)
# Prove that the second OAuth client does not have the same key or secret
# as the default OAuth client.
self.assertNotEqual(default_client_authcode.client.key, self.client2.key)
self.assertNotEqual(default_client_authcode.client.secret,
self.client2.secret)
response = self.oauth_client.request_token_from_authcode(
self.client2,
default_client_authcode.value)
self.assert_token_failure(response, 400)
def test_expired_code_fails(self):
""" If an authorization code is unused within its lifetime, an attempt to
use it will fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
# Modify the authcode's date_created timestamp to be sufficiently far in
# the past that it is now expired.
authcode = self.create_authorization_code(self.user, self.client)
authcode.date_created -= datetime.timedelta(seconds=authcode.lifetime)
authcode.save()
self.assertTrue(authcode.is_expired())
response = self.oauth_client.request_token_from_authcode(
self.client, authcode.value)
self.assert_token_failure(response, 400)
def test_reuse_of_single_authorization_code_fails_and_invalidates_previously_granted_tokens(self):
""" If an authorization code is used more than once, the authorization
server MUST deny the request and SHOULD revoke (when possible) all tokens
previously issued based on that authorization code.
-- http://tools.ietf.org/html/rfc6749#section-4.1.2
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client, authcode.value)
self.assert_token_success(response)
response2 = self.oauth_client.request_token_from_authcode(
self.client, authcode.value)
self.assert_token_failure(response2, 400)
authcode = AuthorizationCode.objects.get(pk=authcode.pk)
for access_token in authcode.access_tokens.all():
self.assertTrue(access_token.invalidated)
def test_no_redirect_uri_passed_defaults_to_registered_and_succeeds(self):
""" If the OAuth client has registered a redirect uri, it is OK to not
explicitly pass the same URI again.
"""
self.initialize(scope_names=['verify', 'autologin'])
# Create an authorization code without a redirect URI.
authcode = self.create_authorization_code(self.user, self.client, {
'redirect_uri' : None
})
# Override the default redirect param to not exist.
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
data={
'redirect_uri' : None,
})
self.assert_token_success(response)
def test_passed_redirect_uri_matches_registered_and_succeeds(self):
""" If the OAuth client has registered a redirect uri, and the same
redirect URI is passed here, the request should succeed.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client, {
'redirect_uri' : self.client.redirect_uri
})
# Request an authorization token with the same redirect as the
# authorization code (the OAuth spec requires them to match.)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
data={
'redirect_uri' : self.client.redirect_uri,
})
self.assert_token_success(response)
def test_passed_redirect_uri_does_not_match_registered_uri_and_fails(self):
""" If the OAuth client has registered a redirect uri, and passes a
different redirect URI to the access token request, the request will fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
# Request an authorization token with a redirect that is different than the
# one registered by the client.
authcode = self.create_authorization_code(self.user, self.client, {
'redirect_uri' : self.client.redirect_uri
})
different_redirect = 'https://NOTlocu.com'
self.assertNotEqual(different_redirect, self.client.redirect_uri)
response = self.oauth_client.request_token_from_authcode(
self.client,
authcode.value,
data={
'redirect_uri' : different_redirect,
})
self.assert_token_failure(response, 400)
def test_after_success_authorization_code_is_invalidated(self):
""" After successfully being exchanged for an AccessToken, an
AuthorizationCode should be marked as 'invalidated' so that it cannot be
used again.
"""
self.initialize(scope_names=['verify', 'autologin'])
authcode = self.create_authorization_code(self.user, self.client)
response = self.oauth_client.request_token_from_authcode(
self.client, authcode.value)
self.assert_token_success(response)
authcode_in_db = AuthorizationCode.objects.get(pk=authcode.pk)
self.assertTrue(authcode_in_db.invalidated)
class TestRequestAccessTokenFromRefreshToken(DJOAuth2TestCase):
def test_request_without_refresh_token_value_fails(self):
""" Requests with "grant_type" of "refresh_token" that do not include a
"refresh_token" value should fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
response = self.oauth_client.request_token_from_refresh_token(
self.client, None)
self.assert_token_failure(response, 400)
def test_request_with_nonexistent_refresh_token_fails(self):
""" Requests with "grant_type" of "refresh_token" that include a
"refresh_token" value that does not exist should fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
refresh_token_value = 'doesnotexist'
self.assertFalse(
AccessToken.objects.filter(refresh_token=refresh_token_value).exists())
response = self.oauth_client.request_token_from_refresh_token(
self.client, refresh_token_value)
self.assert_token_failure(response, 400)
def test_request_with_mismatched_client_and_refresh_token_fails(self):
""" One client may not refresh another client's AccessToken. """
self.initialize(scope_names=['verify', 'autologin'])
default_client_access_token = self.create_access_token(
self.user, self.client)
self.assertNotEqual(default_client_access_token.client.key,
self.client2.key)
self.assertNotEqual(default_client_access_token.client.secret,
self.client2.secret)
response = self.oauth_client.request_token_from_authcode(
self.client2, default_client_access_token.value)
self.assert_token_failure(response, 400)
def test_reuse_of_refresh_token_fails(self):
""" Each refresh token can only be used once. Attempting to refresh with a
token that has already been used will result in a failure.
From http://tools.ietf.org/html/rfc6749#section-10.4 :
The authorization server MUST verify the binding between the refresh
token and client identity whenever the client identity can be
authenticated. For example, the authorization server could employ
refresh token rotation in which a new refresh token is issued with
every access token refresh response. The previous refresh token is
invalidated but retained by the authorization server.
"""
self.initialize(scope_names=['verify', 'autologin'])
access_token_1 = self.create_access_token(self.user, self.client)
response = self.oauth_client.request_token_from_refresh_token(
self.client, access_token_1.refresh_token)
self.assert_token_success(response)
response2 = self.oauth_client.request_token_from_refresh_token(
self.client, access_token_1.refresh_token)
self.assert_token_failure(response2, 400)
existing_token_filter = AccessToken.objects.filter(
refresh_token=access_token_1.refresh_token)
self.assertTrue(existing_token_filter.exists())
self.assertEqual(len(existing_token_filter), 1)
self.assertEqual(existing_token_filter[0].pk, access_token_1.pk)
self.assertTrue(existing_token_filter[0].invalidated)
def test_reuse_of_refresh_token_fails_and_fires_signal(self):
""" Our implementation should fire a
'refresh_token_used_after_invalidation' signal that users may listen to and
use to alert Clients that their refresh tokens have been accessed more than
once. This is as recommendd by
http://tools.ietf.org/html/rfc6749#section-10.4 :
If a refresh token is compromised and subsequently used by both the
attacker and the legitimate client, one of them will present an
invalidated refresh token, which will inform the authorization server
of the breach.
"""
self.initialize(scope_names=['verify', 'autologin'])
access_token = self.create_access_token(self.user, self.client)
access_token.invalidate()
self.assertTrue(access_token.invalidated)
self.received_signal = False
def invalidated_refresh_token_use_callback(signal,
sender,
access_token,
request,
**kwargs):
self.assertEqual(access_token.pk, access_token.pk)
self.assertIsInstance(request, HttpRequest)
self.received_signal = True
refresh_token_used_after_invalidation.connect(
invalidated_refresh_token_use_callback)
response = self.oauth_client.request_token_from_refresh_token(
self.client, access_token.refresh_token)
self.assert_token_failure(response, 400)
self.assertTrue(self.received_signal)
def test_tokens_not_refreshable_fails(self):
""" Attempts to refresh non-rereshable tokens should fail. """
self.initialize(scope_names=['verify', 'autologin'])
settings.DJOAUTH2_ACCESS_TOKENS_REFRESHABLE = False
access_token = self.create_access_token(self.user, self.client)
self.assertFalse(access_token.refreshable)
response = self.oauth_client.request_token_from_refresh_token(
self.client, access_token.refresh_token)
self.assert_token_failure(response, 400)
def test_request_with_no_scope_succeeds_with_scope_equivalent_to_original(self):
""" If an OAuth client makes a refresh token request without specifying the
scope, the client should receive a token with the same scopes as the
original.
Also, I was *this* close to naming this method
"test_xXxXx420HEADSHOT_noscope_SWAGYOLOxXxXx".
"""
self.initialize(scope_names=['verify', 'autologin'])
access_token = self.create_access_token(self.user, self.client)
response = self.oauth_client.request_token_from_refresh_token(
self.client,
access_token.refresh_token,
data={
'scope': None
})
self.assert_token_success(response)
refresh_data = json.loads(response.content)
self.assertEqual(refresh_data['scope'], self.oauth_client.scope_string)
def test_request_with_same_scope_as_original_token_succeeds(self):
""" A request for a new AccessToken made with a RefreshToken that includes
a scope parameter for the same scope as the existing
RefreshToken/AccessToken pair should succeed.
"""
self.initialize(scope_names=['verify', 'autologin'])
access_token_1 = self.create_access_token(self.user, self.client)
scope_string_1 = self.oauth_client.scope_string
response = self.oauth_client.request_token_from_refresh_token(
self.client,
access_token_1.refresh_token,
data={
'scope': scope_string_1,
})
self.assert_token_success(response)
scope_string_2 = json.loads(response.content).get('scope')
self.assertEqual(scope_string_1, scope_string_2)
def test_request_with_subset_of_initial_scope_fails(self):
""" If a new refresh token is issued, the refresh token scope MUST be
identical to that of the refresh token included by the client in the
request. -- http://tools.ietf.org/html/rfc6749#section-6
"""
scope_list_1 = ['verify', 'autologin']
self.initialize(scope_names=scope_list_1)
access_token_1 = self.create_access_token(self.user, self.client)
scope_string_1 = self.oauth_client.scope_string
scope_list_2 = scope_list_1[:1]
self.assertGreater(set(scope_list_1), set(scope_list_2))
response = self.oauth_client.request_token_from_refresh_token(
self.client,
access_token_1.refresh_token,
data={
'scope': ' '.join(scope_list_2),
})
self.assert_token_failure(response, 400)
def test_request_with_superset_of_initial_scope_fails(self):
""" If a new refresh token is issued, the refresh token scope MUST be
identical to that of the refresh token included by the client in the
request. -- http://tools.ietf.org/html/rfc6749#section-6
"""
scope_list_1 = ['verify', 'autologin']
self.initialize(scope_names=scope_list_1)
access_token_1 = self.create_access_token(self.user, self.client)
scope_string_1 = self.oauth_client.scope_string
scope_list_2 = scope_list_1 + ['example']
self.assertGreater(set(scope_list_2), set(scope_list_1))
response = self.oauth_client.request_token_from_refresh_token(
self.client,
access_token_1.refresh_token,
data={
'scope': ' '.join(scope_list_2),
})
self.assert_token_failure(response, 400)
def test_request_with_nonexistent_scope_fails(self):
""" Refresh requests that ask for access to non-existent Scopes should
fail.
"""
self.initialize(scope_names=['verify', 'autologin'])
access_token = self.create_access_token(self.user, self.client)
non_existent_scope_name = 'dne'
self.assertFalse(
Scope.objects.filter(name=non_existent_scope_name).exists())
response = self.oauth_client.request_token_from_refresh_token(
self.client,
access_token.refresh_token,
data={
'scope': non_existent_scope_name,
})
self.assert_token_failure(response, 400)
def test_after_success_refresh_token_is_invalidated(self):
""" After successfully being exchanged for an AccessToken, a refresh token
should be marked as 'invalidated' so that it cannot be used again.
"""
self.initialize(scope_names=['verify', 'autologin'])
access_token = self.create_access_token(self.user, self.client)
response = self.oauth_client.request_token_from_refresh_token(
self.client, access_token.refresh_token)
self.assert_token_success(response)
access_token_in_db = AccessToken.objects.get(pk=access_token.pk)
self.assertTrue(access_token_in_db.invalidated)
|
|
import tempfile
import unittest
import itertools
import nltk
from gensim import corpora
import numpy as np
from orangecontrib.text import preprocess
from orangecontrib.text.corpus import Corpus
from orangecontrib.text.preprocess import Preprocessor
def counted(f):
def wrapped(*args, **kwargs):
wrapped.calls += 1
return f(*args, **kwargs)
wrapped.calls = 0
return wrapped
class PreprocessTests(unittest.TestCase):
sentence = "Human machine interface for lab abc computer applications"
def setUp(self):
self.corpus = Corpus.from_file('deerwester')
def test_string_processor(self):
class StripStringTransformer(preprocess.BaseTransformer):
@classmethod
def transform(cls, string):
return string[:-1]
p = Preprocessor(transformers=StripStringTransformer())
np.testing.assert_equal(p(self.corpus).tokens,
np.array([[doc[:-1]] for doc in self.corpus.documents]))
p = Preprocessor(transformers=[StripStringTransformer(),
preprocess.LowercaseTransformer()])
np.testing.assert_equal(p(self.corpus).tokens,
np.array([[doc[:-1].lower()] for doc in self.corpus.documents]))
self.assertRaises(TypeError, Preprocessor, string_transformers=1)
def test_tokenizer(self):
class SpaceTokenizer(preprocess.BaseTokenizer):
@classmethod
def tokenize(cls, string):
return string.split()
p = Preprocessor(tokenizer=SpaceTokenizer())
np.testing.assert_equal(p(self.corpus).tokens,
np.array([sent.split() for sent in self.corpus.documents]))
def test_token_normalizer(self):
class CapTokenNormalizer(preprocess.BaseNormalizer):
@classmethod
def normalize(cls, token):
return token.capitalize()
p = Preprocessor(normalizer=CapTokenNormalizer())
np.testing.assert_equal(p(self.corpus).tokens,
np.array([[sent.capitalize()] for sent in self.corpus.documents]))
def test_token_filter(self):
class SpaceTokenizer(preprocess.BaseTokenizer):
@classmethod
def tokenize(cls, string):
return string.split()
class LengthFilter(preprocess.BaseTokenFilter):
@classmethod
def check(cls, token):
return len(token) < 4
p = Preprocessor(tokenizer=SpaceTokenizer(), filters=LengthFilter())
np.testing.assert_equal(p(self.corpus).tokens,
np.array([[token for token in doc.split() if len(token) < 4]
for doc in self.corpus.documents]))
def test_inplace(self):
p = Preprocessor(tokenizer=preprocess.RegexpTokenizer('\w'))
corpus = p(self.corpus, inplace=True)
self.assertIs(corpus, self.corpus)
corpus = p(self.corpus, inplace=False)
self.assertIsNot(corpus, self.corpus)
self.assertEqual(corpus, self.corpus)
p = Preprocessor(tokenizer=preprocess.RegexpTokenizer('\w+'))
corpus = p(self.corpus, inplace=False)
self.assertIsNot(corpus, self.corpus)
self.assertNotEqual(corpus, self.corpus)
class TransformationTests(unittest.TestCase):
def test_call(self):
class ReverseStringTransformer(preprocess.BaseTransformer):
name = "Reverse"
def transform(self, string):
return string[::-1]
transformer = ReverseStringTransformer()
self.assertEqual(transformer('abracadabra'), 'arbadacarba')
self.assertEqual(transformer(['abra', 'cadabra']), ['arba', 'arbadac'])
self.assertRaises(TypeError, transformer, 1)
def test_str(self):
class ReverseStringTransformer(preprocess.BaseTransformer):
name = 'reverse'
def transform(self, string):
return string[::-1]
transformer = ReverseStringTransformer()
self.assertIn('reverse', str(transformer))
def test_lowercase(self):
transformer = preprocess.LowercaseTransformer()
self.assertEqual(transformer.transform('Abra'), 'abra')
self.assertEqual(transformer.transform('\u00C0bra'), '\u00E0bra')
def test_strip_accents(self):
transformer = preprocess.StripAccentsTransformer()
self.assertEqual(transformer.transform('Abra'), 'Abra')
self.assertEqual(transformer.transform('\u00C0bra'), 'Abra')
def test_html(self):
transformer = preprocess.HtmlTransformer()
self.assertEqual(transformer('<p>abra<b>cadabra</b><p>'), 'abracadabra')
def test_url_remover(self):
url_remover = preprocess.UrlRemover()
self.assertEqual(url_remover.transform('some link to https://google.com/'), 'some link to ')
self.assertEqual(url_remover.transform('some link to google.com'), 'some link to google.com')
class TokenNormalizerTests(unittest.TestCase):
def setUp(self):
self.stemmer = nltk.PorterStemmer().stem
def test_str(self):
stemmer = preprocess.PorterStemmer()
self.assertIn('porter', str(stemmer).lower())
stemmer = preprocess.SnowballStemmer('french')
self.assertIn('french', str(stemmer).lower())
def test_call(self):
word = "Testing"
tokens = ["Testing", "tokenized", "Sentence"]
stemmer = preprocess.PorterStemmer()
self.assertEqual(stemmer(word), self.stemmer(word))
self.assertEqual(stemmer(tokens),
[self.stemmer(token) for token in tokens])
def test_function(self):
stemmer = preprocess.BaseNormalizer()
stemmer.normalizer = lambda x: x[:-1]
self.assertEqual(stemmer.normalize('token'), 'toke')
def test_snowball(self):
stemmer = preprocess.SnowballStemmer()
stemmer.language = 'french'
token = 'voudrais'
self.assertEqual(stemmer(token), nltk.SnowballStemmer(language='french').stem(token))
def test_porter_with_bad_input(self):
stemmer = preprocess.PorterStemmer()
self.assertRaises(TypeError, stemmer, 10)
def test_lookup_normalize(self):
dln = preprocess.DictionaryLookupNormalizer(dictionary={'aka': 'also known as'})
self.assertEqual(dln.normalize('aka'), 'also known as')
class FilteringTests(unittest.TestCase):
def setUp(self):
self.corpus = Corpus.from_file('deerwester')
def test_str(self):
f = preprocess.StopwordsFilter('french')
self.assertIn('french', str(f).lower())
f = preprocess.FrequencyFilter(keep_n=None)
self.assertNotIn('none', str(f).lower())
f.max_df = .5
self.assertIn('0.5', str(f))
f.max_df = .2
self.assertIn('0.2', str(f))
f = preprocess.LexiconFilter()
self.assertIn('lexicon', str(f).lower())
def test_call(self):
class DigitsFilter(preprocess.BaseTokenFilter):
def check(self, token):
return not token.isdigit()
df = DigitsFilter()
self.assertEqual(df([]), [])
self.assertEqual(df(['a', '1']), ['a'])
self.assertEqual(df([['a', '1']]), [['a']])
def test_stopwords(self):
filter = preprocess.StopwordsFilter('english')
self.assertFalse(filter.check('a'))
self.assertTrue(filter.check('filter'))
def test_lexicon(self):
filter = preprocess.LexiconFilter(['filter'])
self.assertFalse(filter.check('false'))
self.assertTrue(filter.check('filter'))
def test_keep_n(self):
ff = preprocess.FrequencyFilter(keep_n=5)
p = Preprocessor(tokenizer=preprocess.RegexpTokenizer(r'\w+'),
filters=[ff])
processed = p(self.corpus)
self.assertEqual(len(set(itertools.chain(*processed.tokens))), 5)
def test_min_df(self):
ff = preprocess.FrequencyFilter(min_df=.5)
p = Preprocessor(tokenizer=preprocess.RegexpTokenizer(r'\w+'),
filters=[ff])
processed = p(self.corpus)
size = len(processed.documents)
self.assertFrequencyRange(processed, size * .5, size)
ff.min_df = 2
processed = p(self.corpus)
size = len(processed.documents)
self.assertFrequencyRange(processed, 2, size)
def test_max_df(self):
ff = preprocess.FrequencyFilter(max_df=.3)
p = Preprocessor(tokenizer=preprocess.RegexpTokenizer(r'\w+'),
filters=[ff])
size = len(self.corpus.documents)
corpus = p(self.corpus)
self.assertFrequencyRange(corpus, 1, size * .3)
ff.max_df = 2
corpus = p(self.corpus)
self.assertFrequencyRange(corpus, 1, 2)
def assertFrequencyRange(self, corpus, min_fr, max_fr):
dictionary = corpora.Dictionary(corpus.tokens)
self.assertTrue(all(min_fr <= fr <= max_fr
for fr in dictionary.dfs.values()))
def test_word_list(self):
lexicon = preprocess.LexiconFilter()
f = tempfile.NamedTemporaryFile()
f.write(b'hello\nworld\n')
f.flush()
lexicon.from_file(f.name)
self.assertIn('hello', lexicon.lexicon)
self.assertIn('world', lexicon.lexicon)
def test_regex_filter(self):
reg_filter = preprocess.RegexpFilter(r'.')
filtered = reg_filter(self.corpus.tokens[0])
self.assertFalse(filtered)
reg_filter.pattern = 'foo'
self.assertCountEqual(reg_filter(['foo', 'bar']), ['bar'])
reg_filter.pattern = '^http'
self.assertCountEqual(reg_filter(['https', 'http', ' http']), [' http'])
self.assertFalse(preprocess.RegexpFilter.validate_regexp('?'))
self.assertTrue(preprocess.RegexpFilter.validate_regexp('\?'))
class TokenizerTests(unittest.TestCase):
def test_call(self):
class DashTokenizer(preprocess.BaseTokenizer):
@classmethod
def tokenize(cls, string):
return string.split('-')
tokenizer = DashTokenizer()
self.assertEqual(list(tokenizer('dashed-sentence')), ['dashed', 'sentence'])
self.assertEqual(list(tokenizer(['1-2-3', '-'])), [['1', '2', '3'], ['', '']])
self.assertRaises(TypeError, tokenizer, 1)
def test_tokenizer_instance(self):
class WhitespaceTokenizer(preprocess.BaseTokenizer):
tokenizer = nltk.WhitespaceTokenizer()
name = 'whitespace'
tokenizer = WhitespaceTokenizer()
sent = "Test \t tokenizer"
self.assertEqual(tokenizer.tokenize(sent),
nltk.WhitespaceTokenizer().tokenize(sent))
def test_call_with_bad_input(self):
tokenizer = preprocess.RegexpTokenizer(pattern='\w+')
self.assertRaises(TypeError, tokenizer.tokenize, 1)
self.assertRaises(TypeError, tokenizer.tokenize, ['1', 2])
def test_valid_regexp(self):
self.assertTrue(preprocess.RegexpTokenizer.validate_regexp('\w+'))
def test_invalid_regex(self):
for expr in ['\\', '[', ')?']:
self.assertFalse(preprocess.RegexpTokenizer.validate_regexp(expr))
def test_on_change(self):
tokenizer = preprocess.RegexpTokenizer(pattern=r'\w+')
tokenizer.on_change = counted(tokenizer.on_change)
tokenizer.pattern = r'\S+'
self.assertEqual(tokenizer.on_change.calls, 1)
self.assertEqual(tokenizer.pattern, r'\S+')
def test_str(self):
tokenizer = preprocess.RegexpTokenizer(pattern=r'\S+')
self.assertIn('\S+', str(tokenizer))
def test_skip_empty_strings(self):
tokenizer = preprocess.RegexpTokenizer(pattern=r'[^h ]*')
tokens = tokenizer('whatever')
self.assertNotIn('', tokens)
|
|
import pytz
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models import Q
from django.utils import timezone
from osf.models import Node
from osf.models import NodeLog
from osf.models.base import GuidMixin, Guid, BaseModel
from osf.models.mixins import CommentableMixin
from osf.models.spam import SpamMixin
from osf.models import validators
from framework.exceptions import PermissionsError
from osf.utils.fields import NonNaiveDateTimeField
from website import settings
from website.util import api_v2_url
from website.project import signals as project_signals
from website.project.model import get_valid_mentioned_users_guids
class Comment(GuidMixin, SpamMixin, CommentableMixin, BaseModel):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.project.model.Comment'
modm_query = None
# /TODO DELETE ME POST MIGRATION
__guid_min_length__ = 12
OVERVIEW = 'node'
FILES = 'files'
WIKI = 'wiki'
user = models.ForeignKey('OSFUser', null=True)
# the node that the comment belongs to
node = models.ForeignKey('AbstractNode', null=True)
# The file or project overview page that the comment is for
root_target = models.ForeignKey(Guid, on_delete=models.SET_NULL,
related_name='comments',
null=True, blank=True)
# the direct 'parent' of the comment (e.g. the target of a comment reply is another comment)
target = models.ForeignKey(Guid, on_delete=models.SET_NULL,
related_name='child_comments',
null=True, blank=True)
date_created = NonNaiveDateTimeField(default=timezone.now) # auto_now_add=True)
date_modified = NonNaiveDateTimeField(default=timezone.now) # auto_now=True)
modified = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
# The type of root_target: node/files
page = models.CharField(max_length=255, blank=True)
content = models.TextField(
validators=[validators.CommentMaxLength(settings.COMMENT_MAXLENGTH),
validators.string_required]
)
# The mentioned users
# TODO This should be made into an M2M STAT
ever_mentioned = ArrayField(models.CharField(max_length=10, blank=True), default=list, blank=True)
@property
def url(self):
return '/{}/'.format(self._id)
@property
def absolute_api_v2_url(self):
path = '/comments/{}/'.format(self._id)
return api_v2_url(path)
@property
def target_type(self):
"""The object "type" used in the OSF v2 API."""
return 'comments'
@property
def root_target_page(self):
"""The page type associated with the object/Comment.root_target."""
return None
def belongs_to_node(self, node_id):
"""Check whether the comment is attached to the specified node."""
return self.node._id == node_id
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
def get_comment_page_url(self):
if isinstance(self.root_target.referent, Node):
return self.node.absolute_url
return settings.DOMAIN + str(self.root_target._id) + '/'
def get_content(self, auth):
""" Returns the comment content if the user is allowed to see it. Deleted comments
can only be viewed by the user who created the comment."""
if not auth and not self.node.is_public:
raise PermissionsError
if self.is_deleted and ((not auth or auth.user.is_anonymous()) or
(auth and not auth.user.is_anonymous() and self.user._id != auth.user._id)):
return None
return self.content
def get_comment_page_title(self):
if self.page == Comment.FILES:
return self.root_target.referent.name
elif self.page == Comment.WIKI:
return self.root_target.referent.page_name
return ''
def get_comment_page_type(self):
if self.page == Comment.FILES:
return 'file'
elif self.page == Comment.WIKI:
return 'wiki'
return self.node.project_or_component
@classmethod
def find_n_unread(cls, user, node, page, root_id=None):
if node.is_contributor(user):
if page == Comment.OVERVIEW:
view_timestamp = user.get_node_comment_timestamps(target_id=node._id)
root_target = Guid.load(node._id)
elif page == Comment.FILES or page == Comment.WIKI:
view_timestamp = user.get_node_comment_timestamps(target_id=root_id)
root_target = Guid.load(root_id)
else:
raise ValueError('Invalid page')
if not view_timestamp.tzinfo:
view_timestamp = view_timestamp.replace(tzinfo=pytz.utc)
return cls.objects.filter(
Q(node=node) & ~Q(user=user) & Q(is_deleted=False) &
(Q(date_created__gt=view_timestamp) | Q(date_modified__gt=view_timestamp)) &
Q(root_target=root_target)
).count()
return 0
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
if not comment.node.can_comment(auth):
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
log_dict = {
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
}
if isinstance(comment.target.referent, Comment):
comment.root_target = comment.target.referent.root_target
else:
comment.root_target = comment.target
page = getattr(comment.root_target.referent, 'root_target_page', None)
if not page:
raise ValueError('Invalid root target.')
comment.page = page
log_dict.update(comment.root_target.referent.get_extra_log_params(comment))
if comment.content:
new_mentions = get_valid_mentioned_users_guids(comment, comment.node.contributors)
if new_mentions:
project_signals.mention_added.send(comment, new_mentions=new_mentions, auth=auth)
comment.ever_mentioned.extend(new_mentions)
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
log_dict,
auth=auth,
save=False,
)
comment.node.save()
project_signals.comment_added.send(comment, auth=auth)
return comment
def edit(self, content, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to edit this comment'.format(auth.user))
log_dict = {
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
}
log_dict.update(self.root_target.referent.get_extra_log_params(self))
self.content = content
self.modified = True
self.date_modified = timezone.now()
new_mentions = get_valid_mentioned_users_guids(self, self.node.contributors)
if save:
if new_mentions:
project_signals.mention_added.send(self, new_mentions=new_mentions, auth=auth)
self.ever_mentioned.extend(new_mentions)
self.save()
self.node.add_log(
NodeLog.COMMENT_UPDATED,
log_dict,
auth=auth,
save=False,
)
self.node.save()
def delete(self, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
log_dict = {
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
}
self.is_deleted = True
log_dict.update(self.root_target.referent.get_extra_log_params(self))
self.date_modified = timezone.now()
if save:
self.save()
self.node.add_log(
NodeLog.COMMENT_REMOVED,
log_dict,
auth=auth,
save=False,
)
self.node.save()
def undelete(self, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
self.is_deleted = False
log_dict = {
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
}
log_dict.update(self.root_target.referent.get_extra_log_params(self))
self.date_modified = timezone.now()
if save:
self.save()
self.node.add_log(
NodeLog.COMMENT_RESTORED,
log_dict,
auth=auth,
save=False,
)
self.node.save()
@classmethod
def migrate_from_modm(cls, modm_obj):
django_obj = super(Comment, cls).migrate_from_modm(modm_obj)
keys = ['category', 'text', 'date', 'retracted']
for uid, value in django_obj.reports.iteritems():
for key in keys:
django_obj.reports[uid].setdefault(key)
return django_obj
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests transmission of tickets across gRPC-on-the-wire."""
import unittest
from grpc._adapter import _intermediary_low
from grpc._links import invocation
from grpc._links import service
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.interfaces.links import links
from grpc_test import test_common
from grpc_test._links import _proto_scenarios
from grpc_test.framework.common import test_constants
from grpc_test.framework.interfaces.links import test_cases
from grpc_test.framework.interfaces.links import test_utilities
_IDENTITY = lambda x: x
class TransmissionTest(test_cases.TransmissionTest, unittest.TestCase):
def create_transmitting_links(self):
service_link = service.service_link(
{self.group_and_method(): self.deserialize_request},
{self.group_and_method(): self.serialize_response})
port = service_link.add_port('[::]:0', None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost', None,
{self.group_and_method(): self.serialize_request},
{self.group_and_method(): self.deserialize_response})
invocation_link.start()
return invocation_link, service_link
def destroy_transmitting_links(self, invocation_side_link, service_side_link):
invocation_side_link.stop()
service_side_link.begin_stop()
service_side_link.end_stop()
def create_invocation_initial_metadata(self):
return (
('first_invocation_initial_metadata_key', 'just a string value'),
('second_invocation_initial_metadata_key', '0123456789'),
('third_invocation_initial_metadata_key-bin', '\x00\x57' * 100),
)
def create_invocation_terminal_metadata(self):
return None
def create_service_initial_metadata(self):
return (
('first_service_initial_metadata_key', 'just another string value'),
('second_service_initial_metadata_key', '9876543210'),
('third_service_initial_metadata_key-bin', '\x00\x59\x02' * 100),
)
def create_service_terminal_metadata(self):
return (
('first_service_terminal_metadata_key', 'yet another string value'),
('second_service_terminal_metadata_key', 'abcdefghij'),
('third_service_terminal_metadata_key-bin', '\x00\x37' * 100),
)
def create_invocation_completion(self):
return None, None
def create_service_completion(self):
return (
beta_interfaces.StatusCode.OK, b'An exuberant test "details" message!')
def assertMetadataTransmitted(self, original_metadata, transmitted_metadata):
self.assertTrue(
test_common.metadata_transmitted(
original_metadata, transmitted_metadata),
'%s erroneously transmitted as %s' % (
original_metadata, transmitted_metadata))
class RoundTripTest(unittest.TestCase):
def testZeroMessageRoundTrip(self):
test_operation_id = object()
test_group = 'test package.Test Group'
test_method = 'test method'
identity_transformation = {(test_group, test_method): _IDENTITY}
test_code = beta_interfaces.StatusCode.OK
test_message = 'a test message'
service_link = service.service_link(
identity_transformation, identity_transformation)
service_mate = test_utilities.RecordingLink()
service_link.join_link(service_mate)
port = service_link.add_port('[::]:0', None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, None, None, identity_transformation, identity_transformation)
invocation_mate = test_utilities.RecordingLink()
invocation_link.join_link(invocation_mate)
invocation_link.start()
invocation_ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
None, None, None, None, links.Ticket.Termination.COMPLETION, None)
invocation_link.accept_ticket(invocation_ticket)
service_mate.block_until_tickets_satisfy(test_cases.terminated)
service_ticket = links.Ticket(
service_mate.tickets()[-1].operation_id, 0, None, None, None, None,
None, None, None, None, test_code, test_message,
links.Ticket.Termination.COMPLETION, None)
service_link.accept_ticket(service_ticket)
invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
service_link.begin_stop()
service_link.end_stop()
self.assertIs(
service_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
self.assertIs(
invocation_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
self.assertIs(invocation_mate.tickets()[-1].code, test_code)
self.assertEqual(invocation_mate.tickets()[-1].message, test_message)
def _perform_scenario_test(self, scenario):
test_operation_id = object()
test_group, test_method = scenario.group_and_method()
test_code = beta_interfaces.StatusCode.OK
test_message = 'a scenario test message'
service_link = service.service_link(
{(test_group, test_method): scenario.deserialize_request},
{(test_group, test_method): scenario.serialize_response})
service_mate = test_utilities.RecordingLink()
service_link.join_link(service_mate)
port = service_link.add_port('[::]:0', None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost', None,
{(test_group, test_method): scenario.serialize_request},
{(test_group, test_method): scenario.deserialize_response})
invocation_mate = test_utilities.RecordingLink()
invocation_link.join_link(invocation_mate)
invocation_link.start()
invocation_ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
None, None, None, None, None, None)
invocation_link.accept_ticket(invocation_ticket)
requests = scenario.requests()
for request_index, request in enumerate(requests):
request_ticket = links.Ticket(
test_operation_id, 1 + request_index, None, None, None, None, 1, None,
request, None, None, None, None, None)
invocation_link.accept_ticket(request_ticket)
service_mate.block_until_tickets_satisfy(
test_cases.at_least_n_payloads_received_predicate(1 + request_index))
response_ticket = links.Ticket(
service_mate.tickets()[0].operation_id, request_index, None, None,
None, None, 1, None, scenario.response_for_request(request), None,
None, None, None, None)
service_link.accept_ticket(response_ticket)
invocation_mate.block_until_tickets_satisfy(
test_cases.at_least_n_payloads_received_predicate(1 + request_index))
request_count = len(requests)
invocation_completion_ticket = links.Ticket(
test_operation_id, request_count + 1, None, None, None, None, None,
None, None, None, None, None, links.Ticket.Termination.COMPLETION,
None)
invocation_link.accept_ticket(invocation_completion_ticket)
service_mate.block_until_tickets_satisfy(test_cases.terminated)
service_completion_ticket = links.Ticket(
service_mate.tickets()[0].operation_id, request_count, None, None, None,
None, None, None, None, None, test_code, test_message,
links.Ticket.Termination.COMPLETION, None)
service_link.accept_ticket(service_completion_ticket)
invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
service_link.begin_stop()
service_link.end_stop()
observed_requests = tuple(
ticket.payload for ticket in service_mate.tickets()
if ticket.payload is not None)
observed_responses = tuple(
ticket.payload for ticket in invocation_mate.tickets()
if ticket.payload is not None)
self.assertTrue(scenario.verify_requests(observed_requests))
self.assertTrue(scenario.verify_responses(observed_responses))
def testEmptyScenario(self):
self._perform_scenario_test(_proto_scenarios.EmptyScenario())
def testBidirectionallyUnaryScenario(self):
self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
def testBidirectionallyStreamingScenario(self):
self._perform_scenario_test(
_proto_scenarios.BidirectionallyStreamingScenario())
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*'
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
|
|
from __future__ import absolute_import, unicode_literals
import unittest
import mock
from mopidy import backend, core
from mopidy.internal import deprecation
from mopidy.models import Image, Ref, SearchResult, Track
class BaseCoreLibraryTest(unittest.TestCase):
def setUp(self): # noqa: N802
dummy1_root = Ref.directory(uri='dummy1:directory', name='dummy1')
self.backend1 = mock.Mock()
self.backend1.uri_schemes.get.return_value = ['dummy1']
self.backend1.actor_ref.actor_class.__name__ = 'DummyBackend1'
self.library1 = mock.Mock(spec=backend.LibraryProvider)
self.library1.get_images.return_value.get.return_value = {}
self.library1.root_directory.get.return_value = dummy1_root
self.backend1.library = self.library1
self.backend1.has_playlists.return_value.get.return_value = False
dummy2_root = Ref.directory(uri='dummy2:directory', name='dummy2')
self.backend2 = mock.Mock()
self.backend2.uri_schemes.get.return_value = ['dummy2', 'du2']
self.backend2.actor_ref.actor_class.__name__ = 'DummyBackend2'
self.library2 = mock.Mock(spec=backend.LibraryProvider)
self.library2.get_images.return_value.get.return_value = {}
self.library2.root_directory.get.return_value = dummy2_root
self.backend2.library = self.library2
self.backend2.has_playlists.return_value.get.return_value = False
# A backend without the optional library provider
self.backend3 = mock.Mock()
self.backend3.uri_schemes.get.return_value = ['dummy3']
self.backend3.actor_ref.actor_class.__name__ = 'DummyBackend3'
self.backend3.has_library.return_value.get.return_value = False
self.backend3.has_library_browse.return_value.get.return_value = False
self.core = core.Core(mixer=None, backends=[
self.backend1, self.backend2, self.backend3])
# TODO: split by method
class CoreLibraryTest(BaseCoreLibraryTest):
def test_get_images_returns_empty_dict_for_no_uris(self):
self.assertEqual({}, self.core.library.get_images([]))
def test_get_images_returns_empty_result_for_unknown_uri(self):
result = self.core.library.get_images(['dummy4:track'])
self.assertEqual({'dummy4:track': tuple()}, result)
def test_get_images_returns_empty_result_for_library_less_uri(self):
result = self.core.library.get_images(['dummy3:track'])
self.assertEqual({'dummy3:track': tuple()}, result)
def test_get_images_maps_uri_to_backend(self):
self.core.library.get_images(['dummy1:track'])
self.library1.get_images.assert_called_once_with(['dummy1:track'])
self.library2.get_images.assert_not_called()
def test_get_images_maps_uri_to_backends(self):
self.core.library.get_images(['dummy1:track', 'dummy2:track'])
self.library1.get_images.assert_called_once_with(['dummy1:track'])
self.library2.get_images.assert_called_once_with(['dummy2:track'])
def test_get_images_returns_images(self):
self.library1.get_images.return_value.get.return_value = {
'dummy1:track': [Image(uri='uri')]}
result = self.core.library.get_images(['dummy1:track'])
self.assertEqual({'dummy1:track': (Image(uri='uri'),)}, result)
def test_get_images_merges_results(self):
self.library1.get_images.return_value.get.return_value = {
'dummy1:track': [Image(uri='uri1')]}
self.library2.get_images.return_value.get.return_value = {
'dummy2:track': [Image(uri='uri2')]}
result = self.core.library.get_images(
['dummy1:track', 'dummy2:track', 'dummy3:track', 'dummy4:track'])
expected = {'dummy1:track': (Image(uri='uri1'),),
'dummy2:track': (Image(uri='uri2'),),
'dummy3:track': tuple(), 'dummy4:track': tuple()}
self.assertEqual(expected, result)
def test_browse_root_returns_dir_ref_for_each_lib_with_root_dir_name(self):
result = self.core.library.browse(None)
self.assertEqual(result, [
Ref.directory(uri='dummy1:directory', name='dummy1'),
Ref.directory(uri='dummy2:directory', name='dummy2'),
])
self.assertFalse(self.library1.browse.called)
self.assertFalse(self.library2.browse.called)
self.assertFalse(self.backend3.library.browse.called)
def test_browse_empty_string_returns_nothing(self):
result = self.core.library.browse('')
self.assertEqual(result, [])
self.assertFalse(self.library1.browse.called)
self.assertFalse(self.library2.browse.called)
def test_browse_dummy1_selects_dummy1_backend(self):
self.library1.browse.return_value.get.return_value = [
Ref.directory(uri='dummy1:directory:/foo/bar', name='bar'),
Ref.track(uri='dummy1:track:/foo/baz.mp3', name='Baz'),
]
self.core.library.browse('dummy1:directory:/foo')
self.assertEqual(self.library1.browse.call_count, 1)
self.assertEqual(self.library2.browse.call_count, 0)
self.library1.browse.assert_called_with('dummy1:directory:/foo')
def test_browse_dummy2_selects_dummy2_backend(self):
self.library2.browse.return_value.get.return_value = [
Ref.directory(uri='dummy2:directory:/bar/baz', name='quux'),
Ref.track(uri='dummy2:track:/bar/foo.mp3', name='Baz'),
]
self.core.library.browse('dummy2:directory:/bar')
self.assertEqual(self.library1.browse.call_count, 0)
self.assertEqual(self.library2.browse.call_count, 1)
self.library2.browse.assert_called_with('dummy2:directory:/bar')
def test_browse_dummy3_returns_nothing(self):
result = self.core.library.browse('dummy3:test')
self.assertEqual(result, [])
self.assertEqual(self.library1.browse.call_count, 0)
self.assertEqual(self.library2.browse.call_count, 0)
def test_browse_dir_returns_subdirs_and_tracks(self):
self.library1.browse.return_value.get.return_value = [
Ref.directory(uri='dummy1:directory:/foo/bar', name='Bar'),
Ref.track(uri='dummy1:track:/foo/baz.mp3', name='Baz'),
]
result = self.core.library.browse('dummy1:directory:/foo')
self.assertEqual(result, [
Ref.directory(uri='dummy1:directory:/foo/bar', name='Bar'),
Ref.track(uri='dummy1:track:/foo/baz.mp3', name='Baz'),
])
def test_lookup_fails_with_uri_and_uris_set(self):
with self.assertRaises(ValueError):
self.core.library.lookup('dummy1:a', ['dummy2:a'])
def test_lookup_can_handle_uris(self):
track1 = Track(uri='dummy1:a', name='abc')
track2 = Track(uri='dummy2:a', name='def')
self.library1.lookup().get.return_value = [track1]
self.library2.lookup().get.return_value = [track2]
result = self.core.library.lookup(uris=['dummy1:a', 'dummy2:a'])
self.assertEqual(result, {'dummy2:a': [track2], 'dummy1:a': [track1]})
def test_lookup_uris_returns_empty_list_for_dummy3_track(self):
result = self.core.library.lookup(uris=['dummy3:a'])
self.assertEqual(result, {'dummy3:a': []})
self.assertFalse(self.library1.lookup.called)
self.assertFalse(self.library2.lookup.called)
def test_lookup_ignores_tracks_without_uri_set(self):
track1 = Track(uri='dummy1:a', name='abc')
track2 = Track()
self.library1.lookup().get.return_value = [track1, track2]
result = self.core.library.lookup(uris=['dummy1:a'])
self.assertEqual(result, {'dummy1:a': [track1]})
def test_refresh_with_uri_selects_dummy1_backend(self):
self.core.library.refresh('dummy1:a')
self.library1.refresh.assert_called_once_with('dummy1:a')
self.assertFalse(self.library2.refresh.called)
def test_refresh_with_uri_selects_dummy2_backend(self):
self.core.library.refresh('dummy2:a')
self.assertFalse(self.library1.refresh.called)
self.library2.refresh.assert_called_once_with('dummy2:a')
def test_refresh_with_uri_fails_silently_for_dummy3_uri(self):
self.core.library.refresh('dummy3:a')
self.assertFalse(self.library1.refresh.called)
self.assertFalse(self.library2.refresh.called)
def test_refresh_without_uri_calls_all_backends(self):
self.core.library.refresh()
self.library1.refresh.return_value.get.assert_called_once_with()
self.library2.refresh.return_value.get.assert_called_once_with()
def test_search_combines_results_from_all_backends(self):
track1 = Track(uri='dummy1:a')
track2 = Track(uri='dummy2:a')
result1 = SearchResult(tracks=[track1])
result2 = SearchResult(tracks=[track2])
self.library1.search.return_value.get.return_value = result1
self.library2.search.return_value.get.return_value = result2
result = self.core.library.search({'any': ['a']})
self.assertIn(result1, result)
self.assertIn(result2, result)
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=False)
self.library2.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=False)
def test_search_with_uris_selects_dummy1_backend(self):
self.core.library.search(
query={'any': ['a']}, uris=['dummy1:', 'dummy1:foo', 'dummy3:'])
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=['dummy1:', 'dummy1:foo'], exact=False)
self.assertFalse(self.library2.search.called)
def test_search_with_uris_selects_both_backends(self):
self.core.library.search(
query={'any': ['a']}, uris=['dummy1:', 'dummy1:foo', 'dummy2:'])
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=['dummy1:', 'dummy1:foo'], exact=False)
self.library2.search.assert_called_once_with(
query={'any': ['a']}, uris=['dummy2:'], exact=False)
def test_search_filters_out_none(self):
track1 = Track(uri='dummy1:a')
result1 = SearchResult(tracks=[track1])
self.library1.search.return_value.get.return_value = result1
self.library2.search.return_value.get.return_value = None
result = self.core.library.search({'any': ['a']})
self.assertIn(result1, result)
self.assertNotIn(None, result)
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=False)
self.library2.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=False)
def test_search_accepts_query_dict_instead_of_kwargs(self):
track1 = Track(uri='dummy1:a')
track2 = Track(uri='dummy2:a')
result1 = SearchResult(tracks=[track1])
result2 = SearchResult(tracks=[track2])
self.library1.search.return_value.get.return_value = result1
self.library2.search.return_value.get.return_value = result2
result = self.core.library.search({'any': ['a']})
self.assertIn(result1, result)
self.assertIn(result2, result)
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=False)
self.library2.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=False)
def test_search_normalises_bad_queries(self):
self.core.library.search({'any': 'foobar'})
self.library1.search.assert_called_once_with(
query={'any': ['foobar']}, uris=None, exact=False)
class DeprecatedFindExactCoreLibraryTest(BaseCoreLibraryTest):
def run(self, result=None):
with deprecation.ignore('core.library.find_exact'):
return super(DeprecatedFindExactCoreLibraryTest, self).run(result)
def test_find_exact_combines_results_from_all_backends(self):
track1 = Track(uri='dummy1:a')
track2 = Track(uri='dummy2:a')
result1 = SearchResult(tracks=[track1])
result2 = SearchResult(tracks=[track2])
self.library1.search.return_value.get.return_value = result1
self.library2.search.return_value.get.return_value = result2
result = self.core.library.find_exact({'any': ['a']})
self.assertIn(result1, result)
self.assertIn(result2, result)
self.library1.search.assert_called_once_with(
query=dict(any=['a']), uris=None, exact=True)
self.library2.search.assert_called_once_with(
query=dict(any=['a']), uris=None, exact=True)
def test_find_exact_with_uris_selects_dummy1_backend(self):
self.core.library.find_exact(
query={'any': ['a']}, uris=['dummy1:', 'dummy1:foo', 'dummy3:'])
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=['dummy1:', 'dummy1:foo'], exact=True)
self.assertFalse(self.library2.search.called)
def test_find_exact_with_uris_selects_both_backends(self):
self.core.library.find_exact(
query={'any': ['a']}, uris=['dummy1:', 'dummy1:foo', 'dummy2:'])
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=['dummy1:', 'dummy1:foo'], exact=True)
self.library2.search.assert_called_once_with(
query={'any': ['a']}, uris=['dummy2:'], exact=True)
def test_find_exact_filters_out_none(self):
track1 = Track(uri='dummy1:a')
result1 = SearchResult(tracks=[track1])
self.library1.search.return_value.get.return_value = result1
self.library2.search.return_value.get.return_value = None
result = self.core.library.find_exact({'any': ['a']})
self.assertIn(result1, result)
self.assertNotIn(None, result)
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=True)
self.library2.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=True)
def test_find_accepts_query_dict_instead_of_kwargs(self):
track1 = Track(uri='dummy1:a')
track2 = Track(uri='dummy2:a')
result1 = SearchResult(tracks=[track1])
result2 = SearchResult(tracks=[track2])
self.library1.search.return_value.get.return_value = result1
self.library2.search.return_value.get.return_value = result2
result = self.core.library.find_exact({'any': ['a']})
self.assertIn(result1, result)
self.assertIn(result2, result)
self.library1.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=True)
self.library2.search.assert_called_once_with(
query={'any': ['a']}, uris=None, exact=True)
def test_find_exact_normalises_bad_queries(self):
self.core.library.find_exact({'any': 'foobar'})
self.library1.search.assert_called_once_with(
query={'any': ['foobar']}, uris=None, exact=True)
class DeprecatedLookupCoreLibraryTest(BaseCoreLibraryTest):
def run(self, result=None):
with deprecation.ignore('core.library.lookup:uri_arg'):
return super(DeprecatedLookupCoreLibraryTest, self).run(result)
def test_lookup_selects_dummy1_backend(self):
self.library1.lookup.return_value.get.return_value = []
self.core.library.lookup('dummy1:a')
self.library1.lookup.assert_called_once_with('dummy1:a')
self.assertFalse(self.library2.lookup.called)
def test_lookup_selects_dummy2_backend(self):
self.library2.lookup.return_value.get.return_value = []
self.core.library.lookup('dummy2:a')
self.assertFalse(self.library1.lookup.called)
self.library2.lookup.assert_called_once_with('dummy2:a')
def test_lookup_uri_returns_empty_list_for_dummy3_track(self):
result = self.core.library.lookup('dummy3:a')
self.assertEqual(result, [])
self.assertFalse(self.library1.lookup.called)
self.assertFalse(self.library2.lookup.called)
class LegacyFindExactToSearchLibraryTest(unittest.TestCase):
def run(self, result=None):
with deprecation.ignore('core.library.find_exact'):
return super(LegacyFindExactToSearchLibraryTest, self).run(result)
def setUp(self): # noqa: N802
self.backend = mock.Mock()
self.backend.actor_ref.actor_class.__name__ = 'DummyBackend'
self.backend.uri_schemes.get.return_value = ['dummy']
self.backend.library = mock.Mock(spec=backend.LibraryProvider)
self.core = core.Core(mixer=None, backends=[self.backend])
def test_core_find_exact_calls_backend_search_with_exact(self):
self.core.library.find_exact(query={'any': ['a']})
self.backend.library.search.assert_called_once_with(
query=dict(any=['a']), uris=None, exact=True)
def test_core_find_exact_handles_legacy_backend(self):
self.backend.library.search.return_value.get.side_effect = TypeError
self.core.library.find_exact(query={'any': ['a']})
# We are just testing that this doesn't fail.
def test_core_search_call_backend_search_with_exact(self):
self.core.library.search(query={'any': ['a']})
self.backend.library.search.assert_called_once_with(
query=dict(any=['a']), uris=None, exact=False)
def test_core_search_with_exact_call_backend_search_with_exact(self):
self.core.library.search(query={'any': ['a']}, exact=True)
self.backend.library.search.assert_called_once_with(
query=dict(any=['a']), uris=None, exact=True)
def test_core_search_with_handles_legacy_backend(self):
self.backend.library.search.return_value.get.side_effect = TypeError
self.core.library.search(query={'any': ['a']}, exact=True)
# We are just testing that this doesn't fail.
class MockBackendCoreLibraryBase(unittest.TestCase):
def setUp(self): # noqa: N802
dummy_root = Ref.directory(uri='dummy:directory', name='dummy')
self.library = mock.Mock(spec=backend.LibraryProvider)
self.library.root_directory.get.return_value = dummy_root
self.backend = mock.Mock()
self.backend.actor_ref.actor_class.__name__ = 'DummyBackend'
self.backend.uri_schemes.get.return_value = ['dummy']
self.backend.library = self.library
self.core = core.Core(mixer=None, backends=[self.backend])
@mock.patch('mopidy.core.library.logger')
class BrowseBadBackendTest(MockBackendCoreLibraryBase):
def test_backend_raises_exception_for_root(self, logger):
# Might happen if root_directory is a property for some weird reason.
self.library.root_directory.get.side_effect = Exception
self.assertEqual([], self.core.library.browse(None))
logger.exception.assert_called_with(mock.ANY, 'DummyBackend')
def test_backend_returns_none_for_root(self, logger):
self.library.root_directory.get.return_value = None
self.assertEqual([], self.core.library.browse(None))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_returns_wrong_type_for_root(self, logger):
self.library.root_directory.get.return_value = 123
self.assertEqual([], self.core.library.browse(None))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_raises_exception_for_browse(self, logger):
self.library.browse.return_value.get.side_effect = Exception
self.assertEqual([], self.core.library.browse('dummy:directory'))
logger.exception.assert_called_with(mock.ANY, 'DummyBackend')
def test_backend_returns_wrong_type_for_browse(self, logger):
self.library.browse.return_value.get.return_value = [123]
self.assertEqual([], self.core.library.browse('dummy:directory'))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
@mock.patch('mopidy.core.library.logger')
class GetDistinctBadBackendTest(MockBackendCoreLibraryBase):
def test_backend_raises_exception(self, logger):
self.library.get_distinct.return_value.get.side_effect = Exception
self.assertEqual(set(), self.core.library.get_distinct('artist'))
logger.exception.assert_called_with(mock.ANY, 'DummyBackend')
def test_backend_returns_none(self, logger):
self.library.get_distinct.return_value.get.return_value = None
self.assertEqual(set(), self.core.library.get_distinct('artist'))
self.assertFalse(logger.error.called)
def test_backend_returns_wrong_type(self, logger):
self.library.get_distinct.return_value.get.return_value = 'abc'
self.assertEqual(set(), self.core.library.get_distinct('artist'))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_returns_iterable_containing_wrong_types(self, logger):
self.library.get_distinct.return_value.get.return_value = [1, 2, 3]
self.assertEqual(set(), self.core.library.get_distinct('artist'))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
@mock.patch('mopidy.core.library.logger')
class GetImagesBadBackendTest(MockBackendCoreLibraryBase):
def test_backend_raises_exception(self, logger):
uri = 'dummy:/1'
self.library.get_images.return_value.get.side_effect = Exception
self.assertEqual({uri: tuple()}, self.core.library.get_images([uri]))
logger.exception.assert_called_with(mock.ANY, 'DummyBackend')
def test_backend_returns_none(self, logger):
uri = 'dummy:/1'
self.library.get_images.return_value.get.return_value = None
self.assertEqual({uri: tuple()}, self.core.library.get_images([uri]))
self.assertFalse(logger.error.called)
def test_backend_returns_wrong_type(self, logger):
uri = 'dummy:/1'
self.library.get_images.return_value.get.return_value = 'abc'
self.assertEqual({uri: tuple()}, self.core.library.get_images([uri]))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_returns_mapping_containing_wrong_types(self, logger):
uri = 'dummy:/1'
self.library.get_images.return_value.get.return_value = {uri: 'abc'}
self.assertEqual({uri: tuple()}, self.core.library.get_images([uri]))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_returns_mapping_containing_none(self, logger):
uri = 'dummy:/1'
self.library.get_images.return_value.get.return_value = {uri: None}
self.assertEqual({uri: tuple()}, self.core.library.get_images([uri]))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_returns_unknown_uri(self, logger):
uri = 'dummy:/1'
self.library.get_images.return_value.get.return_value = {'foo': []}
self.assertEqual({uri: tuple()}, self.core.library.get_images([uri]))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
@mock.patch('mopidy.core.library.logger')
class LookupByUrisBadBackendTest(MockBackendCoreLibraryBase):
def test_backend_raises_exception(self, logger):
uri = 'dummy:/1'
self.library.lookup.return_value.get.side_effect = Exception
self.assertEqual({uri: []}, self.core.library.lookup(uris=[uri]))
logger.exception.assert_called_with(mock.ANY, 'DummyBackend')
def test_backend_returns_none(self, logger):
uri = 'dummy:/1'
self.library.lookup.return_value.get.return_value = None
self.assertEqual({uri: []}, self.core.library.lookup(uris=[uri]))
self.assertFalse(logger.error.called)
def test_backend_returns_wrong_type(self, logger):
uri = 'dummy:/1'
self.library.lookup.return_value.get.return_value = 'abc'
self.assertEqual({uri: []}, self.core.library.lookup(uris=[uri]))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_returns_iterable_containing_wrong_types(self, logger):
uri = 'dummy:/1'
self.library.lookup.return_value.get.return_value = [123]
self.assertEqual({uri: []}, self.core.library.lookup(uris=[uri]))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_returns_none_with_uri(self, logger):
uri = 'dummy:/1'
self.library.lookup.return_value.get.return_value = None
self.assertEqual([], self.core.library.lookup(uri))
self.assertFalse(logger.error.called)
def test_backend_returns_wrong_type_with_uri(self, logger):
uri = 'dummy:/1'
self.library.lookup.return_value.get.return_value = 'abc'
self.assertEqual([], self.core.library.lookup(uri))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
def test_backend_returns_iterable_wrong_types_with_uri(self, logger):
uri = 'dummy:/1'
self.library.lookup.return_value.get.return_value = [123]
self.assertEqual([], self.core.library.lookup(uri))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
@mock.patch('mopidy.core.library.logger')
class RefreshBadBackendTest(MockBackendCoreLibraryBase):
def test_backend_raises_exception(self, logger):
self.library.refresh.return_value.get.side_effect = Exception
self.core.library.refresh()
logger.exception.assert_called_with(mock.ANY, 'DummyBackend')
def test_backend_raises_exception_with_uri(self, logger):
self.library.refresh.return_value.get.side_effect = Exception
self.core.library.refresh('dummy:/1')
logger.exception.assert_called_with(mock.ANY, 'DummyBackend')
@mock.patch('mopidy.core.library.logger')
class SearchBadBackendTest(MockBackendCoreLibraryBase):
def test_backend_raises_exception(self, logger):
self.library.search.return_value.get.side_effect = Exception
self.assertEqual([], self.core.library.search(query={'any': ['foo']}))
logger.exception.assert_called_with(mock.ANY, 'DummyBackend')
def test_backend_raises_lookuperror(self, logger):
# TODO: is this behavior desired? Do we need to continue handling
# LookupError case specially.
self.library.search.return_value.get.side_effect = LookupError
with self.assertRaises(LookupError):
self.core.library.search(query={'any': ['foo']})
def test_backend_returns_none(self, logger):
self.library.search.return_value.get.return_value = None
self.assertEqual([], self.core.library.search(query={'any': ['foo']}))
self.assertFalse(logger.error.called)
def test_backend_returns_wrong_type(self, logger):
self.library.search.return_value.get.return_value = 'abc'
self.assertEqual([], self.core.library.search(query={'any': ['foo']}))
logger.error.assert_called_with(mock.ANY, 'DummyBackend', mock.ANY)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Subnet pool action implementations"""
import logging
from osc_lib.cli import format_columns
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import sdk_utils
from openstackclient.network.v2 import _tag
LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {
'default_prefix_length': 'default_prefixlen',
'is_shared': 'shared',
'maximum_prefix_length': 'max_prefixlen',
'minimum_prefix_length': 'min_prefixlen',
'tenant_id': 'project_id',
}
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)
_formatters = {
'location': format_columns.DictColumn,
'prefixes': format_columns.ListColumn,
'tags': format_columns.ListColumn,
}
def _get_attrs(client_manager, parsed_args):
attrs = {}
network_client = client_manager.network
if parsed_args.name is not None:
attrs['name'] = parsed_args.name
if parsed_args.prefixes is not None:
attrs['prefixes'] = parsed_args.prefixes
if parsed_args.default_prefix_length is not None:
attrs['default_prefixlen'] = parsed_args.default_prefix_length
if parsed_args.min_prefix_length is not None:
attrs['min_prefixlen'] = parsed_args.min_prefix_length
if parsed_args.max_prefix_length is not None:
attrs['max_prefixlen'] = parsed_args.max_prefix_length
if parsed_args.address_scope is not None:
attrs['address_scope_id'] = network_client.find_address_scope(
parsed_args.address_scope, ignore_missing=False).id
if 'no_address_scope' in parsed_args and parsed_args.no_address_scope:
attrs['address_scope_id'] = None
if parsed_args.default:
attrs['is_default'] = True
if parsed_args.no_default:
attrs['is_default'] = False
if 'share' in parsed_args and parsed_args.share:
attrs['shared'] = True
if 'no_share' in parsed_args and parsed_args.no_share:
attrs['shared'] = False
# "subnet pool set" command doesn't support setting project.
if 'project' in parsed_args and parsed_args.project is not None:
identity_client = client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['tenant_id'] = project_id
if parsed_args.description is not None:
attrs['description'] = parsed_args.description
if parsed_args.default_quota is not None:
attrs['default_quota'] = int(parsed_args.default_quota)
return attrs
def _add_prefix_options(parser, for_create=False):
parser.add_argument(
'--pool-prefix',
metavar='<pool-prefix>',
dest='prefixes',
action='append',
required=for_create,
help=_("Set subnet pool prefixes (in CIDR notation) "
"(repeat option to set multiple prefixes)")
)
parser.add_argument(
'--default-prefix-length',
metavar='<default-prefix-length>',
type=int,
action=parseractions.NonNegativeAction,
help=_("Set subnet pool default prefix length")
)
parser.add_argument(
'--min-prefix-length',
metavar='<min-prefix-length>',
action=parseractions.NonNegativeAction,
type=int,
help=_("Set subnet pool minimum prefix length")
)
parser.add_argument(
'--max-prefix-length',
metavar='<max-prefix-length>',
type=int,
action=parseractions.NonNegativeAction,
help=_("Set subnet pool maximum prefix length")
)
def _add_default_options(parser):
default_group = parser.add_mutually_exclusive_group()
default_group.add_argument(
'--default',
action='store_true',
help=_("Set this as a default subnet pool"),
)
default_group.add_argument(
'--no-default',
action='store_true',
help=_("Set this as a non-default subnet pool"),
)
# TODO(rtheis): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class CreateSubnetPool(command.ShowOne):
_description = _("Create subnet pool")
def get_parser(self, prog_name):
parser = super(CreateSubnetPool, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help=_("Name of the new subnet pool")
)
_add_prefix_options(parser, for_create=True)
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--address-scope',
metavar='<address-scope>',
help=_("Set address scope associated with the subnet pool "
"(name or ID), prefixes must be unique across address "
"scopes")
)
_add_default_options(parser)
shared_group = parser.add_mutually_exclusive_group()
shared_group.add_argument(
'--share',
action='store_true',
help=_("Set this subnet pool as shared"),
)
shared_group.add_argument(
'--no-share',
action='store_true',
help=_("Set this subnet pool as not shared"),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_("Set subnet pool description")
)
parser.add_argument(
'--default-quota',
type=int,
metavar='<num-ip-addresses>',
help=_("Set default per-project quota for this subnet pool "
"as the number of IP addresses that can be allocated "
"from the subnet pool")),
_tag.add_tag_option_to_parser_for_create(parser, _('subnet pool'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
attrs = _get_attrs(self.app.client_manager, parsed_args)
# NeutronServer expects prefixes to be a List
if "prefixes" not in attrs:
attrs['prefixes'] = []
obj = client.create_subnet_pool(**attrs)
# tags cannot be set when created, so tags need to be set later.
_tag.update_tags_for_set(client, obj, parsed_args)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
class DeleteSubnetPool(command.Command):
_description = _("Delete subnet pool(s)")
def get_parser(self, prog_name):
parser = super(DeleteSubnetPool, self).get_parser(prog_name)
parser.add_argument(
'subnet_pool',
metavar='<subnet-pool>',
nargs='+',
help=_("Subnet pool(s) to delete (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
result = 0
for pool in parsed_args.subnet_pool:
try:
obj = client.find_subnet_pool(pool, ignore_missing=False)
client.delete_subnet_pool(obj)
except Exception as e:
result += 1
LOG.error(_("Failed to delete subnet pool with "
"name or ID '%(pool)s': %(e)s"),
{'pool': pool, 'e': e})
if result > 0:
total = len(parsed_args.subnet_pool)
msg = (_("%(result)s of %(total)s subnet pools failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
# TODO(rtheis): Use only the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class ListSubnetPool(command.Lister):
_description = _("List subnet pools")
def get_parser(self, prog_name):
parser = super(ListSubnetPool, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
shared_group = parser.add_mutually_exclusive_group()
shared_group.add_argument(
'--share',
action='store_true',
help=_("List subnet pools shared between projects"),
)
shared_group.add_argument(
'--no-share',
action='store_true',
help=_("List subnet pools not shared between projects"),
)
default_group = parser.add_mutually_exclusive_group()
default_group.add_argument(
'--default',
action='store_true',
help=_("List subnet pools used as the default external "
"subnet pool"),
)
default_group.add_argument(
'--no-default',
action='store_true',
help=_("List subnet pools not used as the default external "
"subnet pool")
)
parser.add_argument(
'--project',
metavar='<project>',
help=_("List subnet pools according to their project (name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--name',
metavar='<name>',
help=_("List only subnet pools of given name in output")
)
parser.add_argument(
'--address-scope',
metavar='<address-scope>',
help=_("List only subnet pools of given address scope "
"in output (name or ID)")
)
_tag.add_tag_filtering_option_to_parser(parser, _('subnet pools'))
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
network_client = self.app.client_manager.network
filters = {}
if parsed_args.share:
filters['shared'] = True
filters['is_shared'] = True
elif parsed_args.no_share:
filters['shared'] = False
filters['is_shared'] = False
if parsed_args.default:
filters['is_default'] = True
elif parsed_args.no_default:
filters['is_default'] = False
if parsed_args.project:
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
filters['tenant_id'] = project_id
filters['project_id'] = project_id
if parsed_args.name is not None:
filters['name'] = parsed_args.name
if parsed_args.address_scope:
address_scope = network_client.find_address_scope(
parsed_args.address_scope,
ignore_missing=False)
filters['address_scope_id'] = address_scope.id
_tag.get_tag_filtering_args(parsed_args, filters)
data = network_client.subnet_pools(**filters)
headers = ('ID', 'Name', 'Prefixes')
columns = ('id', 'name', 'prefixes')
if parsed_args.long:
headers += ('Default Prefix Length', 'Address Scope',
'Default Subnet Pool', 'Shared', 'Tags')
columns += ('default_prefix_length', 'address_scope_id',
'is_default', 'is_shared', 'tags')
return (headers,
(utils.get_item_properties(
s, columns,
formatters=_formatters,
) for s in data))
# TODO(rtheis): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class SetSubnetPool(command.Command):
_description = _("Set subnet pool properties")
def get_parser(self, prog_name):
parser = super(SetSubnetPool, self).get_parser(prog_name)
parser.add_argument(
'subnet_pool',
metavar='<subnet-pool>',
help=_("Subnet pool to modify (name or ID)")
)
parser.add_argument(
'--name',
metavar='<name>',
help=_("Set subnet pool name")
)
_add_prefix_options(parser)
address_scope_group = parser.add_mutually_exclusive_group()
address_scope_group.add_argument(
'--address-scope',
metavar='<address-scope>',
help=_("Set address scope associated with the subnet pool "
"(name or ID), prefixes must be unique across address "
"scopes")
)
address_scope_group.add_argument(
'--no-address-scope',
action='store_true',
help=_("Remove address scope associated with the subnet pool")
)
_add_default_options(parser)
parser.add_argument(
'--description',
metavar='<description>',
help=_("Set subnet pool description")
)
parser.add_argument(
'--default-quota',
type=int,
metavar='<num-ip-addresses>',
help=_("Set default per-project quota for this subnet pool "
"as the number of IP addresses that can be allocated "
"from the subnet pool")),
_tag.add_tag_option_to_parser_for_set(parser, _('subnet pool'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_subnet_pool(parsed_args.subnet_pool,
ignore_missing=False)
attrs = _get_attrs(self.app.client_manager, parsed_args)
# Existing prefixes must be a subset of the new prefixes.
if 'prefixes' in attrs:
attrs['prefixes'].extend(obj.prefixes)
if attrs:
client.update_subnet_pool(obj, **attrs)
# tags is a subresource and it needs to be updated separately.
_tag.update_tags_for_set(client, obj, parsed_args)
class ShowSubnetPool(command.ShowOne):
_description = _("Display subnet pool details")
def get_parser(self, prog_name):
parser = super(ShowSubnetPool, self).get_parser(prog_name)
parser.add_argument(
'subnet_pool',
metavar='<subnet-pool>',
help=_("Subnet pool to display (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_subnet_pool(
parsed_args.subnet_pool,
ignore_missing=False
)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
class UnsetSubnetPool(command.Command):
_description = _("Unset subnet pool properties")
def get_parser(self, prog_name):
parser = super(UnsetSubnetPool, self).get_parser(prog_name)
parser.add_argument(
'subnet_pool',
metavar="<subnet-pool>",
help=_("Subnet pool to modify (name or ID)")
)
_tag.add_tag_option_to_parser_for_unset(parser, _('subnet pool'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_subnet_pool(
parsed_args.subnet_pool, ignore_missing=False)
# tags is a subresource and it needs to be updated separately.
_tag.update_tags_for_unset(client, obj, parsed_args)
|
|
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
import os
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import symmetry_measure
json_files_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..",
'test_files', "chemenv", "json_test_files")
class CoordinationGeometryFinderTest(PymatgenTest):
def setUp(self):
self.lgf = LocalGeometryFinder()
self.lgf.setup_parameters(centering_type='standard',
structure_refinement=self.lgf.STRUCTURE_REFINEMENT_NONE)
# self.strategies = [SimplestChemenvStrategy(), SimpleAbundanceChemenvStrategy()]
def test_abstract_geometry(self):
cg_ts3 = self.lgf.allcg['TS:3']
cg_tet = self.lgf.allcg['T:4']
abstract_geom = AbstractGeometry.from_cg(cg=cg_ts3, centering_type='central_site')
self.assertArrayAlmostEqual(abstract_geom.centre, [0.0, 0.0, 0.0])
abstract_geom = AbstractGeometry.from_cg(cg=cg_ts3, centering_type='centroid')
self.assertArrayAlmostEqual(abstract_geom.centre, [0.0, 0.0, 0.33333333333])
with self.assertRaises(ValueError) as cm:
AbstractGeometry.from_cg(cg=cg_ts3, centering_type='central_site',
include_central_site_in_centroid=True)
self.assertEqual(str(cm.exception), 'The center is the central site, no calculation of the centroid, '
'variable include_central_site_in_centroid should be set to False')
abstract_geom = AbstractGeometry.from_cg(cg=cg_ts3, centering_type='centroid',
include_central_site_in_centroid=True)
self.assertArrayAlmostEqual(abstract_geom.centre, [0.0, 0.0, 0.25])
self.assertEqual(abstract_geom.__str__(),
'\nAbstract Geometry with 3 points :\n'
' [-1. 0. -0.25]\n'
' [ 1. 0. -0.25]\n'
' [0. 0. 0.75]\n'
'Points are referenced to the centroid (calculated with the central site) :\n'
' [0. 0. 0.25]\n')
symm_dict = symmetry_measure([[0.0, 0.0, 0.0]], [1.1, 2.2, 3.3])
self.assertAlmostEqual(symm_dict['symmetry_measure'], 0.0)
self.assertEqual(symm_dict['scaling_factor'], None)
self.assertEqual(symm_dict['rotation_matrix'], None)
tio2_struct = self.get_structure('TiO2')
envs = self.lgf.compute_coordination_environments(structure=tio2_struct, indices=[0])
self.assertAlmostEqual(envs[0][0]['csm'], 1.5309987846957258)
self.assertAlmostEqual(envs[0][0]['ce_fraction'], 1.0)
self.assertEqual(envs[0][0]['ce_symbol'], 'O:6')
self.assertEqual(sorted(envs[0][0]['permutation']), sorted([0, 4, 1, 5, 2, 3]))
self.lgf.setup_random_structure(coordination=5)
self.assertEqual(len(self.lgf.structure), 6)
self.lgf.setup_random_indices_local_geometry(coordination=5)
self.assertEqual(self.lgf.icentral_site, 0)
self.assertEqual(len(self.lgf.indices), 5)
self.lgf.setup_ordered_indices_local_geometry(coordination=5)
self.assertEqual(self.lgf.icentral_site, 0)
self.assertEqual(self.lgf.indices, list(range(1, 6)))
self.lgf.setup_explicit_indices_local_geometry(explicit_indices=[3, 5, 2, 0, 1, 4])
self.assertEqual(self.lgf.icentral_site, 0)
self.assertEqual(self.lgf.indices, [4, 6, 3, 1, 2, 5])
LiFePO4_struct = self.get_structure('LiFePO4')
isite = 10
envs_LiFePO4 = self.lgf.compute_coordination_environments(structure=LiFePO4_struct, indices=[isite])
self.assertAlmostEqual(envs_LiFePO4[isite][0]['csm'], 0.140355832317)
nbs_coords = [np.array([6.16700437, -4.55194317, -5.89031356]),
np.array([4.71588167, -4.54248093, -3.75553856]),
np.array([6.88012571, -5.79877503, -3.73177541]),
np.array([6.90041188, -3.32797839, -3.71812416])]
self.lgf.setup_structure(LiFePO4_struct)
self.lgf.setup_local_geometry(isite, coords=nbs_coords)
perfect_tet = AbstractGeometry.from_cg(cg=cg_tet,
centering_type='centroid',
include_central_site_in_centroid=False)
points_perfect_tet = perfect_tet.points_wcs_ctwcc()
res = self.lgf.coordination_geometry_symmetry_measures_fallback_random(coordination_geometry=cg_tet,
NRANDOM=5,
points_perfect=points_perfect_tet)
permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps = res
for perm_csm_dict in permutations_symmetry_measures:
self.assertAlmostEqual(perm_csm_dict['symmetry_measure'], 0.140355832317)
#
# def _strategy_test(self, strategy):
# files = []
# for (dirpath, dirnames, filenames) in os.walk(json_files_dir):
# files.extend(filenames)
# break
#
# for ifile, json_file in enumerate(files):
# with self.subTest(json_file=json_file):
# f = open("{}/{}".format(json_files_dir, json_file), 'r')
# dd = json.load(f)
# f.close()
#
# atom_indices = dd['atom_indices']
# expected_geoms = dd['expected_geoms']
#
# struct = Structure.from_dict(dd['structure'])
#
# struct = self.lgf.setup_structure(struct)
# se = self.lgf.compute_structure_environments_detailed_voronoi(only_indices=atom_indices,
# maximum_distance_factor=1.5)
#
# #All strategies should get the correct environment with their default parameters
# strategy.set_structure_environments(se)
# for ienv, isite in enumerate(atom_indices):
# ce = strategy.get_site_coordination_environment(struct[isite])
# try:
# coord_env = ce[0]
# except TypeError:
# coord_env = ce
# #Check that the environment found is the expected one
# self.assertEqual(coord_env, expected_geoms[ienv])
#
# def test_simplest_chemenv_strategy(self):
# strategy = SimplestChemenvStrategy()
# self._strategy_test(strategy)
#
# def test_simple_abundance_chemenv_strategy(self):
# strategy = SimpleAbundanceChemenvStrategy()
# self._strategy_test(strategy)
def test_perfect_environments(self):
allcg = AllCoordinationGeometries()
indices_CN = {1: [0],
2: [1, 0],
3: [1, 0, 2],
4: [2, 0, 3, 1],
5: [2, 3, 1, 0, 4],
6: [0, 2, 3, 1, 5, 4],
7: [2, 6, 0, 3, 4, 5, 1],
8: [1, 2, 6, 3, 7, 0, 4, 5],
9: [5, 2, 6, 0, 4, 7, 3, 8, 1],
10: [8, 5, 6, 3, 0, 7, 2, 4, 9, 1],
11: [7, 6, 4, 1, 2, 5, 0, 8, 9, 10, 3],
12: [5, 8, 9, 0, 3, 1, 4, 2, 6, 11, 10, 7],
13: [4, 11, 5, 12, 1, 2, 8, 3, 0, 6, 9, 7, 10],
}
for coordination in range(1, 14):
for mp_symbol in allcg.get_implemented_geometries(coordination=coordination,
returned='mp_symbol'):
cg = allcg.get_geometry_from_mp_symbol(mp_symbol=mp_symbol)
self.lgf.allcg = AllCoordinationGeometries(only_symbols=[mp_symbol])
self.lgf.setup_test_perfect_environment(mp_symbol, randomness=False,
indices=indices_CN[coordination],
random_translation='NONE', random_rotation='NONE',
random_scale='NONE')
se = self.lgf.compute_structure_environments(only_indices=[0],
maximum_distance_factor=1.01*cg.distfactor_max,
min_cn=cg.coordination_number,
max_cn=cg.coordination_number,
only_symbols=[mp_symbol]
)
self.assertAlmostEqual(se.get_csm(0, mp_symbol)['symmetry_measure'], 0.0, delta=1e-8,
msg='Failed to get perfect environment with mp_symbol {}'.format(mp_symbol))
def test_disable_hints(self):
allcg = AllCoordinationGeometries()
mp_symbol = 'SH:13'
mp_symbols = ['SH:13', 'HP:12']
cg = allcg.get_geometry_from_mp_symbol(mp_symbol=mp_symbol)
mypoints = cg.points
mypoints[-1] = [0.9*cc for cc in mypoints[-1]]
self.lgf.allcg = AllCoordinationGeometries(only_symbols=[mp_symbol])
self.lgf.setup_test_perfect_environment(mp_symbol, randomness=False,
indices=[4, 11, 5, 12, 1, 2, 8, 3, 0, 6, 9, 7, 10],
random_translation='NONE', random_rotation='NONE',
random_scale='NONE', points=mypoints)
se_nohints = self.lgf.compute_structure_environments(only_indices=[0],
maximum_distance_factor=1.02 * cg.distfactor_max,
min_cn=12,
max_cn=13,
only_symbols=mp_symbols,
get_from_hints=False
)
se_hints = self.lgf.compute_structure_environments(only_indices=[0],
maximum_distance_factor=1.02 * cg.distfactor_max,
min_cn=12,
max_cn=13,
only_symbols=mp_symbols,
get_from_hints=True
)
with self.assertRaises(KeyError):
abc = se_nohints.ce_list[0][12]
abc.minimum_geometries()
self.assertAlmostEqual(se_hints.ce_list[0][13][0], se_nohints.ce_list[0][13][0])
self.assertTrue(set(se_nohints.ce_list[0].keys()).issubset(set(se_hints.ce_list[0].keys())))
if __name__ == "__main__":
unittest.main()
|
|
'''Multimethods (generic functions)
'''
from __future__ import absolute_import
import sys
from compiler import parse as py_compiler_parse
from compiler.ast import Keyword as AstKeyword
from ..bases import CachingBase
from ..collections import OrderedDict
from ..fakeatypes import (as_optimized_type, type_name, compose_types_scorer,
no_score, best_score, worst_score)
__all__ = '''MultiMethodError only before after around
InconsistenCallSignature InvalidMethodArguments
NoSuchMethod
MultiMethod
defmethod defboth_wrapper
current_method
'''.split()
not_specified = object()
class MultiMethodError(Exception):
pass
def parse_sig(sig, gbls=None, lcls=None):
'''hideous hack to allow signatures to be defined as
"int,int,c=str,d=object"
ie.
parse_sig("int,int,c=str,d=object") ->
(int, int), [("c",str), ("d",object)]
properly handles order of keywords
'''
callstr = '_parse_sig_func_(%s)' % sig
co = compile(callstr, '<string>', 'eval')
if gbls is None:
gbls = dict()
if lcls is None:
lcls = {}
lcls = gbls.copy()
lcls['_parse_sig_func_'] = lambda *args, **kwds: (args,kwds)
args,kwds = eval(co, gbls, lcls)
ast = py_compiler_parse(callstr, mode='eval')
kwds_order = [arg.name for arg in ast.node.args
if isinstance(arg, AstKeyword)]
kwds = sorted(kwds.iteritems(),
key=lambda (name,_): kwds_order.index(name))
return args, kwds
class InvalidCallArguments(Exception):
def __init__(self, expected_sig, given_sig):
self.expected_sig = expected_sig
self.given_sig = given_sig
def __str__(self):
return 'bad call arguments %s for signature %s' % (self.given_sig, self.expected_sig)
class MethodSignature(CachingBase):
__slots__ = ['nags','kwds']
def _init_cached(self, nargs, kwds):
self.nargs = nargs
self.kwds = kwds
@classmethod
def get_key(cls, nargs, kwds):
return int(nargs), tuple(kwds)
@classmethod
def from_sig_string(cls, s):
callstr = 'func(%s)' % (s,)
ast = py_compiler_parse(callstr, mode='eval')
kwds = [arg.name for arg in ast.node.args
if isinstance(arg, AstKeyword)]
return cls(len(ast.node.args) - len(kwds), kwds)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__,
self.nargs, self.kwds)
def __str__(self):
acc = ['arg%d' % i for i in xrange(self.nargs)]
acc.extend('%s=' for s in self.kwds)
return '(%s)' % ', '.join(acc)
@classmethod
def from_call(cls, args, kwds):
return cls(len(args), kwds)
def bad_call(self, args, kwds):
raise InvalidCallArguments(self, self.from_call(args, kwds))
def partition_call_vector(self, args, kwds):
if not self.kwds:
if len(args) != self.nargs or kwds:
self.bad_call(args, kwds)
return args
return self.partition_call_vector_kwds(self, args, kwds)
class no_kwd(object):
def __repr__(self):
return '<no-kwd>'
no_kwd = no_kwd()
@staticmethod
def partition_call_vector_kwds(self, args, kwds):
nargs = len(args)
if nargs < self.nargs or nargs > self.nargs + len(self.kwds):
self.bad_call(args, kwds)
if kwds and not (set(kwds) <= set(self.kwds)):
self.bad_call(args, kwds)
call_vector = list(args)
for kwd in self.kwds[nargs-self.nargs:]:
call_vector.append(kwds.get(kwd, self.no_kwd))
return call_vector
def as_types_of_call_vector(self, vec, keyers, default):
if not self.kwds:
return tuple([typer(arg) for typer,arg in zip(keyers, vec)])
return tuple([typer(arg) if arg is not self.no_kwd else default
for typer,arg in zip(keyers, vec)])
def perform_call(self, vec, func):
if not self.kwds:
return func(*vec)
return self.perform_call_kwds(self, vec, func)
@staticmethod
def perform_call_kwds(self, vec, func):
no_kwd = self.no_kwd
return func(*vec[:self.nargs],
**dict([(k,v) for k,v in zip(self.kwds, vec[self.nargs:])
if v is not no_kwd]))
def type_sig_from_call_vector(self, vec):
return TypeSignature(map(type, vec[:self.nargs]),
[(k,type(v)) for k,v in zip(self.kwds, vec[self.nargs:])
if v is not no_kwd])
def format_signature(args, kwds, format_value=repr):
if isinstance(kwds, dict):
kwds = kwds.iteritems()
acc = map(format_value, args)
acc.extend('%s=%s' % (name,format_value(tp))
for name,tp in kwds)
return '(%s)' % ', '.join(acc)
class CallSignature(object):
def __init__(self, args, kwds):
self.args = args
self.kwds = kwds
def __str__(self):
return format_signature(self.args, self.kwds)
@classmethod
def from_call(cls, *args, **kwds):
return cls(args, kwds)
class TypeSignature(CachingBase):
__slots__ = ['args','kwds','types']
def _init_cached(self, args, kwds):
self.args = args
self.kwds = kwds
self.types = self.args + tuple(t for k,t in self.kwds)
@classmethod
def get_key(cls, args, kwds):
return tuple(map(as_optimized_type, args)), tuple((k,as_optimized_type(t)) for k,t in kwds)
@classmethod
def from_sig_string(cls, s, *args, **kwds):
return cls(*parse_sig(s, *args, **kwds))
def __str__(self):
return format_signature(self.args, self.kwds, type_name)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__,
self.args, self.kwds)
def calculate_method_signature(self):
if not self.kwds:
return MethodSignature(len(self.args), ())
return MethodSignature(len(self.args),
[k for k,v in self.kwds])
# # # # # # # # # # # # # # # # #
# method combination compilers #
# # # # # # # # # # # # # # # # #
class CombinationType(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
only,before,after,around = map(CombinationType, 'only before after around'.split())
combination_compilers = {}
def compile_only(mm, method, last_func):
return method.func
combination_compilers[only] = compile_only
def missing_inner(mm, method):
raise MultiMethodError("combination=%s %s with no inner method" %
(method.combination, mm.name))
def wrapper_fixup(mm):
def wrap(func):
func.func_name = mm.name
return func
return wrap
def compile_before(mm, method, last_func):
if last_func is None:
missing_inner(mm, method)
before = method.func
@wrapper_fixup(mm)
def wrap(*args, **kwds):
before(*args, **kwds)
return last_func(*args, **kwds)
return wrap
combination_compilers[before] = compile_before
def compile_after(mm, method, last_func):
if last_func is None:
missing_inner(mm, method)
after = method.func
@wrapper_fixup(mm)
def wrap(*args, **kwds):
op = last_func(*args, **kwds)
after(*args, **kwds)
return op
return wrap
combination_compilers[after] = compile_after
def compile_around(mm, method, last_func):
if last_func is None:
missing_inner(mm, method)
around = method.func
@wrapper_fixup(mm)
def wrap(*args, **kwds):
return around(last_func, *args, **kwds)
return wrap
combination_compilers[around] = compile_around
# # # # # # # # #
# multimethods #
# # # # # # # # #
class Method(object):
'''one method of definitive signiature and a corresponding function
for a multimethods. also contains a combination rule for
combination with other methods
'''
__slots__ = ['type_sig','func','combination','scorers']
def __init__(self, type_sig, func, combination):
self.type_sig = type_sig
self.func = func
self.combination = combination
class InconsistenCallSignature(MultiMethodError):
def __init__(self, mm, invalid_sig):
self.mm = mm
self.invalid_sig = invalid_sig
def __str__(self):
return 'invalid method signature %s for %s' % (self.invalid_sig, self.mm)
class InvalidMethodArguments(MultiMethodError):
def __init__(self, mm, meth_sig):
self.mm = mm
self.meth_sig = meth_sig
def __str__(self):
return 'invalid arguments to %s; called with %s' % (self.mm, self.meth_sig)
class NoSuchMethod(MultiMethodError):
def __init__(self, mm, call_sig):
self.mm = mm
self.call_sig = call_sig
def __str__(self):
return 'no such method for %s called with %s' % (self.mm, self.call_sig)
def __repr__(self):
return str(self)
def score_call_vector(scorers, call_key):
acc = []
for scorer,key in zip(scorers, call_key):
if key is not_specified:
score = worst_score
else:
score = scorer(key)
if score is no_score:
return score
acc.append(score)
#print 'score',self.type_sig,score
return acc
class MultiMethod(object):
'''
'''
def __init__(self, name='<multilambda>', doc='', signature=None,
default_combination=None, cache=True, inherit_from=()):
self.name = name
self.doc = doc
self.methods = []
if isinstance(signature, str):
signature = MethodSignature.from_sig_string(signature)
self.signature = signature
self.default_combination = default_combination
self.type_keyers = None
self.all_methods = None
self.scorers = {}
self.callcache = dict() if cache else None
for i_f in inherit_from:
self.inherit_from(i_f)
def __str__(self):
return '%s%s' % (self.name, self.signature if self.signature else '<unspecified>')
def __call__(self, *args, **kwds):
if not self.type_keyers:
self.build_type_keys()
self.get_signature()
try:
call_vector = self.signature.partition_call_vector(args, kwds)
except InvalidCallArguments,e:
raise InvalidMethodArguments(self, e.given_sig)
call_key = self.signature.as_types_of_call_vector(call_vector, self.type_keyers, not_specified)
if self.callcache is None:
func = self.calculate_method(call_key)
else:
try:
func = self.callcache[call_key]
except KeyError:
func = self.callcache[call_key] = self.calculate_method(call_key)
except TypeError:
#unhashable key, go direct
func = self.calculate_method(call_key)
return self.signature.perform_call(call_vector, func)
inherts_from_port = None
inherts_to_port = None
def inherit_from(self, parent):
if self.inherts_from_port is None:
from jamenson.runtime.ports import PortList, connect
self.inherts_from_port = PortList(self)
parent.inherts_to(self)
def inherts_to(self, child):
from jamenson.runtime.ports import PortList, connect
if self.inherts_to_port is None:
self.inherts_to_port = PortList(self)
connect(self.inherts_to_port, child.inherts_from_port)
def register_method(self, typesig, func, combination):
methsig = typesig.calculate_method_signature()
if not self.signature:
self.signature = methsig
elif methsig is not self.signature:
raise InconsistenCallSignature(self, methsig)
self.methods.append(Method(typesig, func, combination))
self.invalidate()
def invalidate(self):
if self.callcache:
self.callcache.clear()
self.type_keyers = None
self.all_methods = None
if self.inherts_to_port:
from jamenson.runtime.ports import get_cells
for child in get_cells(self.inherts_to_port):
child.invalidate()
def build_type_keys(self):
self.all_methods = self.get_all_methods()
if not self.all_methods:
raise MultiMethodError("no methods defined for %s" % (self.name))
argument_types = zip(*[meth.type_sig.types for meth in self.all_methods])
keyers_and_scorers = map(compose_types_scorer, argument_types)
self.type_keyers,scorers = zip(*keyers_and_scorers)
meths_scorers = zip(*scorers)
for method,meth_scorers in zip(self.all_methods, meths_scorers):
self.scorers[method] = meth_scorers
def get_signature(self):
if self.signature is None:
if self.inherts_from_port is None:
raise RuntimeError("no methods defined")
from jamenson.runtime.ports import get_cells
for parent in get_cells(self.inherts_from_port):
if self.signature is None:
self.signature = parent.get_signature()
elif self.signature is not parent.signature:
raise InconsistenCallSignature(self, parent.signature)
return self.signature
def get_all_methods(self):
if self.all_methods is not None:
return self.all_methods
meths = self.methods
if self.inherts_from_port is not None:
from jamenson.runtime.ports import get_cells
for parent in get_cells(self.inherts_from_port):
meths = parent.get_all_methods() + meths
self.all_methods = meths
return meths
def calculate_method(self, call_key):
applicable_methods = []
for meth in self.all_methods:
score = score_call_vector(self.scorers[meth], call_key)
if score is not no_score:
applicable_methods.append([meth,score])
if not applicable_methods:
return self.no_applicable_methods_call(call_key)
applicable_methods.reverse()
applicable_methods.sort(key=lambda (meth,score): score) #rellies on Python's stable sorts
return self.build_method_combination(meth for meth,score in applicable_methods)
def no_applicable_methods_call(self, call_key):
call_sig = self.signature.perform_call(call_key, CallSignature.from_call)
error = NoSuchMethod(self, call_sig)
def wrapper(*args, **kwds):
raise error
return wrapper
def build_method_combination(self, methods):
last_func = None
for method in reversed(list(methods)):
try:
compiler = combination_compilers[method.combination]
except KeyError:
raise RuntimeError("unhandled combination %s" % method.combination)
else:
last_func = compiler(self, method, last_func)
#last_func.func_name = self.name
#last_func.func_doc = self.doc
return last_func
def defmethod(mm, sig, combination=None, ns=None):
if combination is None:
combination = mm.default_combination
if combination is None:
combination = only
def wrapper(func):
xsig = sig
if isinstance(xsig, str):
f = sys._getframe(1)
xsig = TypeSignature.from_sig_string(sig, f.f_globals if ns is None else ns, f.f_locals)
elif isinstance(xsig, (list,tuple)):
xsig = TypeSignature(xsig, {})
assert isinstance(xsig, TypeSignature), 'bad typesig %s' % (xsig,)
mm.register_method(xsig, func, combination)
return func
return wrapper
def defboth_wrapper(mm, sig, stack_depth=1, combination=None):
'''Many two element method definitions are agnostic to the order of the
arguments; only the type semantic matter. This defines both types of
combinations of two arguments (when they are different) to handle such
cases easily.
'''
if isinstance(sig, str):
f = sys._getframe(stack_depth)
args,kwds = parse_sig(sig, f.f_globals, f.f_locals)
assert not kwds
else:
args = sig
kwds = {}
assert len(args)==2
if combination is None:
combination = mm.default_combination
if combination is None:
combination = only
def wrapper(func):
mm.register_method(TypeSignature(args, kwds), func, combination)
if args[0] != args[1]:
mm.register_method(TypeSignature(args[::-1], kwds), lambda a,b: func(b,a), combination)
return func
return wrapper
def current_method():
f = sys._getframe()
while f:
if f.f_code.co_name == '__call__' and 'self' in f.f_locals:
self = f.f_locals['self']
if isinstance(self, MultiMethod):
return self
f = f.f_back
raise MultiMethodError("MultiMethod instance not found in call stack")
|
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Helpers for AST analysis"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import ast
import sys
import itertools
from collections import namedtuple
from future.utils import viewvalues
CallDependency = namedtuple("Call", "line col")
ReturnDependency = namedtuple("Return", "line col")
class Variable(object): # pylint: disable=too-few-public-methods
"""Represent a variable name"""
def __init__(self, name, typ):
self.name = name
self.type = typ
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if isinstance(other, str):
return self.name == other
elif isinstance(other, Variable):
return self.name == other.name
return False
def __repr__(self):
return "Var({}, {})".format(self.name, self.type)
class Dependency(object): # pylint: disable=too-few-public-methods
"""Represent a variable dependency"""
def __init__(self, dependency, typ):
self.dependency = dependency
self.type = typ
def __hash__(self):
return hash(self.dependency)
def __eq__(self, other):
if not isinstance(other, Dependency):
return False
return self.dependency == other.dependency and self.type == other.type
def __repr__(self):
return "Dependency({}, {})".format(self.dependency, self.type)
def variable(name, typ):
"""Create Variable or Call Dependency"""
if isinstance(name, str):
return Variable(name, typ)
if isinstance(name, (Variable, ReturnDependency, CallDependency)):
return name
if typ == "return":
return ReturnDependency(*name)
if typ in ("call", "print", "import", "import from"):
return CallDependency(*name)
return Variable(name, typ)
class NamedContext(object):
"""Store variable visibility context"""
def __init__(self):
self._names = [set()]
self.use = False
def flat(self):
"""Return available variable in the current context"""
result = set()
for name in self._names:
result = result.union(name)
return result
def enable(self):
"""Enable variable collection"""
self.use = True
self._names.append(set())
def disable(self):
"""Disable variable collection"""
self.use = False
def pop(self):
"""Remove sub-context from stack"""
self._names.pop()
def add(self, name):
"""Add variable to context"""
if self.use:
self._names[-1].add(name)
class DefinitionObject(object):
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.info())
def _dependencies(self, node, visitor_class, func): # pylint: disable=no-self-use
"""Extract name dependencies from node"""
visitor = visitor_class()
visitor.visit(node)
return [func(x if isinstance(x, FunctionCall) else x[0])
for x in visitor.names]
class Loop(DefinitionObject):
"""Loop class. Used for For and While nodes"""
def __init__(self, node, typ):
self.node = node
self.first_line = node.first_line
self.last_line = node.last_line
self.type = typ
self.iterable = []
self.iter_var = []
self.maybe_call = None
def info(self):
"""Return call information"""
result = ("first_line={}, last_line={}")
return result.format(self.first_line, self.last_line)
def add_iterable(self, node, visitor_class):
"""Extract dependencies from iterable"""
self.iterable = self._dependencies(
node, visitor_class, lambda x: Dependency(x, "direct"))
def add_iter_var(self, node, visitor_class):
"""Extract dependencies from iterable"""
self.iter_var = self._dependencies(
node, visitor_class, lambda x: Variable(x, "normal"))
class Condition(DefinitionObject):
"""Loop class. Used for If and While nodes"""
def __init__(self, node):
self.node = node
self.first_line = node.first_line
self.last_line = node.last_line
self.test_var = []
self.has_return = False
def info(self):
"""Return call information"""
result = ("first_line={}, last_line={}")
return result.format(self.first_line, self.last_line)
def add_test(self, node, visitor_class):
"""Extract dependencies from iterable"""
self.test_var += self._dependencies(
node, visitor_class, lambda x: Dependency(x, "conditional"))
class FunctionCall(ast.NodeVisitor): # pylint: disable=too-many-instance-attributes
"""Represent a function call"""
def __init__(self, visitor_class):
self.self_attr = []
self.func = []
self.args = []
self.keywords = {}
self.starargs = []
self.kwargs = []
self.result = None
self.line = -1
self.col = -1
self.visitor_class = visitor_class
self.name = ""
self.prefix = "call"
def all_args(self):
"""List arguments of function call"""
return [
Dependency(x, "parameter")
for x in itertools.chain(
self.self_attr,
itertools.chain.from_iterable(self.args),
self.starargs,
self.kwargs,
itertools.chain.from_iterable(viewvalues(self.keywords))
)
]
def use_visitor(self, node):
"""Use configured visitor to visit sub node"""
visitor = self.visitor_class()
visitor.visit(node)
return [x if isinstance(x, FunctionCall) else x[0]
for x in visitor.names]
def visit_Call(self, node): # pylint: disable=invalid-name
"""Visit Call"""
self.func = self.use_visitor(node.func)
if isinstance(node.func, ast.Attribute):
self.self_attr = self.use_visitor(node.func.value)
self.args = []
for arg in node.args:
if sys.version_info <= (3, 4) or not isinstance(arg, ast.Starred):
self.args.append(self.use_visitor(arg))
else:
self.visit(arg)
for keyword in node.keywords:
self.visit(keyword)
if hasattr(node, "starargs"):
# Python <= 3.4
if node.starargs:
self.starargs = self.use_visitor(node.starargs)
if node.kwargs:
self.kwargs = self.use_visitor(node.kwargs)
def visit_Starred(self, node): # pylint: disable=invalid-name
"""Visit Starred. Only valid in Call context after Python 3.5"""
# Python 3.5
self.starargs += self.use_visitor(node)
def visit_keyword(self, node):
"""Visit keyword"""
if node.arg:
self.keywords[node.arg] = self.use_visitor(node.value)
else:
# Python 3.5
self.kwargs += self.use_visitor(node.value)
def info(self):
"""Return call information"""
result = ("line={}, col={}, "
"func={}, args={}, keywords={}, *args={}, **kwargs={}")
return result.format(self.line, self.col, self.func, self.args,
self.keywords, self.starargs, self.kwargs)
def __repr__(self):
return "F({})".format(self.info())
class ClassDef(FunctionCall):
"""Represent a class definition"""
def __repr__(self):
return "Class(line={}, col={})".format(self.line, self.col)
class Decorator(FunctionCall):
"""Represent a decorator"""
def __init__(self, *args, **kwargs):
super(Decorator, self).__init__(*args, **kwargs)
self.is_fn = True
def __repr__(self):
return "Decorator({})".format(self.info())
def visit_Name(self, node): # pylint: disable=invalid-name
"""Visit Name"""
self.func = self.use_visitor(node)
self.is_fn = False
def info(self):
"""Return decorator information"""
if self.is_fn:
return super(Decorator, self).info()
return "line={}, col={}, name={}".format(
self.line, self.col, self.func)
class Generator(FunctionCall):
"""Represent a generator"""
def __init__(self, *args, **kwargs):
self.type = args[-1]
args = args[:-1]
super(Generator, self).__init__(*args, **kwargs)
def __repr__(self):
return "Generator({})".format(self.info())
def info(self):
"""Return generator information"""
return "line={}, col={}, type={}".format(
self.line, self.col, self.type)
class GeneratorCall(Generator):
"""Represent a generator call
CALL_FUNCTION for set and dict comprehension on Python 2 and Python 3
CALL_FUNCTION for list comprehension on Python 3
"""
def __repr__(self):
return "GeneratorCall({})".format(self.info())
class Assert(FunctionCall):
"""Represent an assert"""
def __init__(self, *args, **kwargs):
self.msg = args[-1]
args = args[:-1]
super(Assert, self).__init__(*args, **kwargs)
def __repr__(self):
return "Assert({})".format(self.info())
def info(self):
"""Return assert information"""
return "line={}, col={}, msg={}".format(
self.line, self.col, self.msg)
class Print(FunctionCall):
"""Represent a print statement"""
def __init__(self, *args, **kwargs):
super(Print, self).__init__(*args, **kwargs)
def __repr__(self):
return "Print({})".format(self.info())
class With(FunctionCall):
"""Represent a with"""
def __repr__(self):
return "With({})".format(self.info())
def info(self):
"""Return with information"""
return "line={}, col={}".format(
self.line, self.col)
class Import(FunctionCall):
"""Represent an import statement"""
def __init__(self, *args, **kwargs):
super(Import, self).__init__(*args, **kwargs)
self.prefix = "import"
def __repr__(self):
return "Import(line={})".format(self.line)
class ForIter(FunctionCall):
"""Represent a for iter"""
def __init__(self, *args, **kwargs):
super(ForIter, self).__init__(*args, **kwargs)
self.prefix = "iterator"
def __repr__(self):
return "ForIter({})".format(self.info())
def info(self):
"""Return ForIter information"""
return "line={}, col={}".format(
self.line, self.col)
def index(lis, alternatives):
"""Return index of one of the <alternatives> in <lis>"""
for alt in alternatives:
try:
return lis.index(alt)
except ValueError:
pass
return None
def safeget(container, ind):
"""Try to access element in container. If it fails, prints container"""
try:
return container[ind]
except IndexError as err:
if not err.args:
err.args = ("",)
import pprint
err.args = (err.args[0] + "\n Get\n Index {}\n Container \n{}".format(
ind, pprint.pformat(list(enumerate(container)))),) + err.args[1:]
raise err
|
|
#!/usr/bin/env python
# Copyright (c) 2008 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
import sys
from PyQt4.QtCore import (QFile, QFileInfo, QSettings,
QString, QStringList, QTimer, QVariant, SIGNAL)
from PyQt4.QtGui import (QAction, QApplication, QFileDialog, QIcon,
QKeySequence, QMainWindow, QMessageBox, QTextEdit, QTabWidget,
QShortcut)
import textedit
import qrc_resources
__version__ = "1.0.0"
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.tabs = QTabWidget()
self.setCentralWidget(self.tabs)
fileNewAction = self.createAction("&New", self.fileNew,
QKeySequence.New, "filenew", "Create a text file")
fileOpenAction = self.createAction("&Open...", self.fileOpen,
QKeySequence.Open, "fileopen",
"Open an existing text file")
fileSaveAction = self.createAction("&Save", self.fileSave,
QKeySequence.Save, "filesave", "Save the text")
fileSaveAsAction = self.createAction("Save &As...",
self.fileSaveAs, icon="filesaveas",
tip="Save the text using a new filename")
fileSaveAllAction = self.createAction("Save A&ll",
self.fileSaveAll, "filesave",
tip="Save all the files")
fileCloseAction = self.createAction("&Close", self.closeTab,
"Ctrl+W")
fileQuitAction = self.createAction("&Quit", self.close,
"Ctrl+Q", "filequit", "Close the application")
editCopyAction = self.createAction("&Copy", self.editCopy,
QKeySequence.Copy, "editcopy",
"Copy text to the clipboard")
editCutAction = self.createAction("Cu&t", self.editCut,
QKeySequence.Cut, "editcut",
"Cut text to the clipboard")
editPasteAction = self.createAction("&Paste", self.editPaste,
QKeySequence.Paste, "editpaste",
"Paste in the clipboard's text")
fileMenu = self.menuBar().addMenu("&File")
self.addActions(fileMenu, (fileNewAction, fileOpenAction,
fileSaveAction, fileSaveAsAction, fileSaveAllAction,
None, fileCloseAction, fileQuitAction))
editMenu = self.menuBar().addMenu("&Edit")
self.addActions(editMenu, (editCopyAction, editCutAction,
editPasteAction))
fileToolbar = self.addToolBar("File")
fileToolbar.setObjectName("FileToolbar")
self.addActions(fileToolbar, (fileNewAction, fileOpenAction,
fileSaveAction))
editToolbar = self.addToolBar("Edit")
editToolbar.setObjectName("EditToolbar")
self.addActions(editToolbar, (editCopyAction, editCutAction,
editPasteAction))
QShortcut(QKeySequence.PreviousChild, self, self.previousTab)
QShortcut(QKeySequence.NextChild, self, self.nextTab)
settings = QSettings()
self.restoreGeometry(
settings.value("MainWindow/Geometry").toByteArray())
self.restoreState(
settings.value("MainWindow/State").toByteArray())
status = self.statusBar()
status.setSizeGripEnabled(False)
status.showMessage("Ready", 5000)
self.setWindowTitle("Text Editor")
QTimer.singleShot(0, self.loadFiles)
def createAction(self, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/{0}.png".format(icon)))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
@staticmethod
def addActions(target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def closeEvent(self, event):
failures = []
for i in range(self.tabs.count()):
textEdit = self.tabs.widget(i)
if textEdit.isModified():
try:
textEdit.save()
except IOError, e:
failures.append(unicode(e))
if (failures and
QMessageBox.warning(self, "Text Editor -- Save Error",
"Failed to save{0}\nQuit anyway?".format(
"\n\t".join(failures)),
QMessageBox.Yes|QMessageBox.No) ==
QMessageBox.No):
event.ignore()
return
settings = QSettings()
settings.setValue("MainWindow/Geometry",
QVariant(self.saveGeometry()))
settings.setValue("MainWindow/State",
QVariant(self.saveState()))
files = QStringList()
while self.tabs.count() > 0:
textEdit = self.tabs.widget(0)
self.tabs.removeTab(0)
if not textEdit.filename.startsWith("Unnamed"):
files.append(textEdit.filename)
textEdit.close()
settings.setValue("CurrentFiles", QVariant(files))
def loadFiles(self):
if len(sys.argv) > 1:
for filename in sys.argv[1:11]:
filename = QString(filename)
if QFileInfo(filename).isFile():
self.loadFile(filename)
QApplication.processEvents()
else:
settings = QSettings()
files = settings.value("CurrentFiles").toStringList()
for filename in files:
filename = QString(filename)
if QFile.exists(filename):
self.loadFile(filename)
QApplication.processEvents()
def nextTab(self):
index = self.tabs.currentIndex() + 1
if index >= self.tabs.count():
index = 0
self.tabs.setCurrentIndex(index)
def previousTab(self):
index = self.tabs.currentIndex() - 1
if index <= 0:
index = 0
self.tabs.setCurrentIndex(index)
def fileNew(self):
textEdit = textedit.TextEdit()
self.tabs.addTab(textEdit, textEdit.title)
self.tabs.setCurrentIndex(self.tabs.count() - 1)
def fileOpen(self):
filename = QFileDialog.getOpenFileName(self,
"Text Editor -- Open File")
if not filename.isEmpty():
for i in range(self.tabs.count()):
textEdit = self.tabs.widget(i)
if textEdit.filename == filename:
self.tabs.setCurrentIndex(i)
break
else:
self.loadFile(filename)
def loadFile(self, filename):
textEdit = textedit.TextEdit(filename)
try:
textEdit.load()
except (IOError, OSError), e:
QMessageBox.warning(self, "Text Editor -- Load Error",
"Failed to load {0}: {1}".format(filename, e))
textEdit.close()
del textEdit
else:
self.tabs.addTab(textEdit, textEdit.title)
self.tabs.setCurrentIndex(self.tabs.count() - 1)
def fileSave(self):
textEdit = self.tabs.widget(self.tabs.currentIndex())
if textEdit is None or not isinstance(textEdit, QTextEdit):
return True
try:
textEdit.save()
return True
except (IOError, OSError), e:
QMessageBox.warning(self, "Text Editor -- Save Error",
"Failed to save {0}: {1}".format(textEdit.filename, e))
return False
def fileSaveAs(self):
textEdit = self.tabs.widget(self.tabs.currentIndex())
if textEdit is None or not isinstance(textEdit, QTextEdit):
return
filename = QFileDialog.getSaveFileName(self,
"Text Editor -- Save File As",
textEdit.filename, "Text files (*.txt *.*)")
if not filename.isEmpty():
textEdit.filename = filename
return self.fileSave()
return True
def fileSaveAll(self):
errors = []
for i in range(self.tabs.count()):
textEdit = self.tabs.widget(i)
if textEdit.isModified():
try:
textEdit.save()
except (IOError, OSError), e:
errors.append("{0}: {1}".format(textEdit.filename, e))
if errors:
QMessageBox.warning(self,
"Text Editor -- Save All Error",
"Failed to save\n{0}".format("\n".join(errors)))
def closeTab(self):
index = self.tabs.currentIndex()
if index != -1:
tab = self.tabs.widget(index)
self.tabs.removeTab(index)
tab.close()
def editCopy(self):
textEdit = self.tabs.widget(self.tabs.currentIndex())
if textEdit is None or not isinstance(textEdit, QTextEdit):
return
cursor = textEdit.textCursor()
text = cursor.selectedText()
if not text.isEmpty():
clipboard = QApplication.clipboard()
clipboard.setText(text)
def editCut(self):
textEdit = self.tabs.widget(self.tabs.currentIndex())
if textEdit is None or not isinstance(textEdit, QTextEdit):
return
cursor = textEdit.textCursor()
text = cursor.selectedText()
if not text.isEmpty():
cursor.removeSelectedText()
clipboard = QApplication.clipboard()
clipboard.setText(text)
def editPaste(self):
textEdit = self.tabs.widget(self.tabs.currentIndex())
if textEdit is None or not isinstance(textEdit, QTextEdit):
return
clipboard = QApplication.clipboard()
textEdit.insertPlainText(clipboard.text())
app = QApplication(sys.argv)
app.setWindowIcon(QIcon(":/icon.png"))
app.setOrganizationName("Qtrac Ltd.")
app.setOrganizationDomain("qtrac.eu")
app.setApplicationName("Text Editor")
form = MainWindow()
form.show()
app.exec_()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO:
# * Convert to vectors?
"""
=============
XY Pad Widget
=============
An XY pad widget with a draggable, bouncing puck. Pick up data on the
"outbox" outbox to receive the position of the puck and messages indicating
when it has touched one of the sides.
Example Usage
-------------
Create an XY pad which redraws 60 times per second:
from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
clock = Clock(float(1)/60).activate()
xyPad = XYPad().activate()
clock.link((clock, "outbox"), (xyPad, "newframe"))
How Does it Work?
-----------------
The component requests a display surface from the Pygame Display service
component. This is used as the surface of the XY pad. It binds listeners for
mouse click and motion to the service.
The component works in one of two different modes, bouncing and non-bouncing.
This is specified upon initialization by the bouncingPuck argument.
In the bouncing mode the puck will continue to move once it has been set into
motion by a mouse drag. If the mouse button remains down for longer than 0.1
seconds it is deemed to be a drag. In the bouncing mode the component sends a
(message, 1) tuple to the "outbox" outbox each time the puck collides with one
of the sides. The messages can be changed using the collisionMsg argument.
They default to "top", "right", "bottom", "left".
In the non-bouncing mode the puck remains stationary after it has been dragged.
Both modes send a (positionMsg, (x, y)) tuple to the "outbox" outbox if the
puck moves.
If the editable argument to the constructor is set to be false the pad will not
respond to mouse presses.
As well as being controlled by the mouse an XY pad can be controlled externally,
for example by a second XY pad. Position and velocity messages received on the
"remoteChanges" inbox are used to change the motion of the puck. Position
messages are of the form ("Position", (xPos, yPos)), and velocity messages are
of the form ("Velocity", (xVel, yVel)).
In order to allow communication between two XY pads the component outputs
position and velocity messages to the "localChanges" outbox. By connecting the
"localChanges" outbox of one XY pad to the "remoteChanges" inbox of another,
the second pad can duplicate the motion of the first.
The XY pad only redraws the surface and updates the puck position when it
receives a message on its "newframe" inbox. Note that although providing
messages more frequently here will lead to more frequent updates, it will also
lead to higher CPU usage.
The visual appearance of the pad can be specified by arguments to the
constructor. The size, position and colours are all adjustable.
If a producerFinished or shutdownMicroprocess message is received on its
"control" inbox, it is passed on out of its "signal" outbox and the component
terminates.
"""
import time
import pygame
import Axon
from Axon.Ipc import producerFinished, shutdownMicroprocess, WaitComplete
from Kamaelia.UI.GraphicDisplay import PygameDisplay
class XYPad(Axon.Component.component):
"""\
XYPad([bouncingPuck, position, bgcolour, fgcolour, positionMsg,
collisionMsg, size]) -> new XYPad component.
Create an XY pad widget using the Pygame Display service. Sends messages
for position and direction changes out of its "outbox" outbox.
Keyword arguments (all optional):
bouncingPuck -- whether the puck will continue to move after it has been
dragged (default=True)
position -- (x,y) position of top left corner in pixels
bgcolour -- (r,g,b) fill colour (default=(255,255,255))
fgcolor -- (r, g, b) colour of the puck and border
messagePrefix -- string to be prepended to all messages
positionMsg -- sent as the first element of a (positionMsg, 1) tuple when
the puck moves
collisionMsg -- (t, r, b, l) sent as the first element of a
(collisionMsg[i], 1) tuple when the puck hits a side
(default = ("top", "right", "bottom", "left"))
size -- (w,h) in pixels (default=(100, 100))
"""
Inboxes = {"inbox" : "Receive events from Pygame Display",
"remoteChanges" : "Receive messages to alter the state of the XY pad",
"control" : "For shutdown messages",
"callback" : "Receive callbacks from Pygame Display",
"newframe" : "Recieve messages indicating a new frame is to be drawn"
}
Outboxes = {"outbox" : "XY positions emitted here",
"localChanges" : "Messages indicating change in the state of the XY pad emitted here",
"signal" : "For shutdown messages",
"display_signal" : "Outbox used for communicating to the display surface"
}
def __init__(self, bouncingPuck=True, position=None,
bgcolour=(255, 255, 255), fgcolour=(0, 0, 0),
messagePrefix = "",
positionMsg="Position",
collisionMsg = ("Top", "Right", "Bottom", "Left"),
size=(100, 100), editable=True):
"""
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
"""
super(XYPad, self).__init__()
self.size = size
# Does the puck bounce around
self.bouncingPuck = bouncingPuck
# Is the puck currently bouncing around
self.isBouncing = False
self.mouseDown = False
self.clickTime = None
self.mousePositions = []
self.lastMousePos = (0, 0)
self.puckRadius = 10
self.puckPos = [self.size[0]/2, self.size[1]/2]
self.puckVel = [0, 0]
self.borderWidth = 5
self.bgcolour = bgcolour
self.fgcolour = fgcolour
self.messagePrefix = messagePrefix
self.positionMsg = positionMsg
self.collisionMsg = collisionMsg
self.editable = editable
self.dispRequest = {"DISPLAYREQUEST" : True,
"callback" : (self,"callback"),
"events" : (self, "inbox"),
"size": self.size,
}
if position:
self.dispRequest["position"] = position
def waitBox(self, boxName):
"""Wait for a message on boxName inbox"""
while 1:
if self.dataReady(boxName):
return
else:
yield 1
def main(self):
"""Main loop."""
displayService = PygameDisplay.getDisplayService()
self.link((self,"display_signal"), displayService)
self.send(self.dispRequest,
"display_signal")
# Wait until we get a display
while 1:
yield WaitComplete(self.waitBox("callback"))
break
self.display = self.recv("callback")
# Initial render so we don't see a blank screen
self.render()
if self.editable:
self.send({"ADDLISTENEVENT" : pygame.MOUSEBUTTONDOWN,
"surface" : self.display},
"display_signal")
self.send({"ADDLISTENEVENT" : pygame.MOUSEBUTTONUP,
"surface" : self.display},
"display_signal")
self.send({"ADDLISTENEVENT" : pygame.MOUSEMOTION,
"surface" : self.display},
"display_signal")
done = False
while not done:
if not self.anyReady():
self.pause()
yield 1
while self.dataReady("control"):
cmsg = self.recv("control")
if (isinstance(cmsg, producerFinished) or
isinstance(cmsg, shutdownMicroprocess)):
self.send(cmsg, "signal")
done = True
while self.dataReady("inbox"):
for event in self.recv("inbox"):
if event.type == pygame.MOUSEBUTTONDOWN:
self.clickTime = time.time()
if self.display.get_rect().collidepoint(*event.pos):
self.mouseDown = True
self.isBouncing = False
self.mousePositions = []
self.puckVel = [0, 0]
self.puckPos = list(event.pos)
self.lastMousePos = event.pos
self.send((self.messagePrefix + self.positionMsg,
(float(self.puckPos[0])/self.size[0],
float(self.puckPos[1])/self.size[1])),
"localChanges")
self.send((self.messagePrefix + "Velocity",
self.puckVel), "localChanges")
if event.type == pygame.MOUSEBUTTONUP:
if self.mouseDown:
if (self.bouncingPuck and
time.time() - self.clickTime > 0.1):
# Click and drag
self.isBouncing = True
if len(self.mousePositions):
for i in xrange(2):
# Use the average of the last 50
# relative mouse positions
positions = [x[i] for x in self.mousePositions]
self.puckVel[i] = sum(positions)
self.puckVel[i] /= float(len(positions))
else:
# Just a click
self.puckVel = [0, 0]
self.render()
self.send((self.messagePrefix + "Velocity",
self.puckVel), "localChanges")
self.mouseDown = False
if event.type == pygame.MOUSEMOTION and self.mouseDown:
if self.display.get_rect().collidepoint(*event.pos):
# We are dragging inside the display
# Keep a buffer of 50 mouse positions
if len(self.mousePositions) > 50:
del self.mousePositions[0]
relPos = []
for i in xrange(2):
relPos.append(event.pos[i] -
self.lastMousePos[i])
self.mousePositions.append(relPos)
# Move the puck to where the mouse is and remember
# where it is
self.puckPos = list(event.pos)
self.lastMousePos = event.pos
self.send((self.messagePrefix + self.positionMsg,
(float(self.puckPos[0])/self.size[0],
float(self.puckPos[1])/self.size[1])),
"localChanges")
self.render()
if self.dataReady("remoteChanges"):
bundle = self.recv("remoteChanges")
# The action to take is given by the last section of the
# OSC address - this should maybe be done by a component and
# we just listen for ("Velocity", (xVel, yVel)) tuples
action = bundle[0].split("/")[-1]
if action == "Velocity":
if self.bouncingPuck:
self.puckVel = bundle[1]
self.isBouncing = 1
elif action == "Position":
for i in xrange(2):
self.puckPos[i] = self.size[i] * bundle[1][i]
self.render()
if self.dataReady("newframe"):
# Time to render a new frame
# Clear any backlog of render messages
while self.dataReady("newframe"):
self.recv("newframe")
# Change the direction of the puck if it hits a wall
if self.isBouncing:
self.processCollisions()
if self.isBouncing:
# Update the position
for i in xrange(2):
self.puckPos[i] += self.puckVel[i]
if self.puckPos[i] < 0:
self.puckPos[i] = 0
if self.puckPos[i] > self.size[i]:
self.puckPos[i] = self.size[i]
self.render()
def processCollisions(self):
"""
Detect whether the puck has collided with a wall, and change its
direction appropriately
"""
if self.puckPos[0] <= 0:
# Left wall
self.puckVel[0] *= -1
self.send((self.messagePrefix + self.collisionMsg[3], 1), "outbox")
if self.puckPos[0] >= self.size[0]:
# Right wall
self.puckVel[0] *= -1
self.send((self.messagePrefix + self.collisionMsg[1], 1), "outbox")
if self.puckPos[1] <= 0:
# Top wall
self.puckVel[1] *= -1
self.send((self.messagePrefix + self.collisionMsg[0], 1), "outbox")
if self.puckPos[1] >= self.size[1]:
# Bottom wall
self.puckVel[1] *= -1
self.send((self.messagePrefix + self.collisionMsg[2], 1), "outbox")
def render(self):
"""Draw the border and puck onto the surface"""
# Background
self.display.fill(self.bgcolour)
# Border
pygame.draw.rect(self.display, self.fgcolour,
self.display.get_rect(), self.borderWidth)
# Puck
pygame.draw.circle(self.display, self.fgcolour,
[int(x) for x in self.puckPos], self.puckRadius)
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
self.send((self.messagePrefix + self.positionMsg,
(float(self.puckPos[0])/self.size[0],
float(self.puckPos[1])/self.size[1])), "outbox")
class XYPadMidiConverter(Axon.Component.component):
channel = 0
mapping = {"Top" : 36, # Bass drum
"Right" : 38, # Snare
"Bottom" : 42, # Closed HH
"Left" : 49} # Crash
positionCCNumbers = (0, 1)
def main(self):
while 1:
if self.dataReady("inbox"):
message = self.recv("inbox")
address = message[0].split("/")[-1]
if address == "Position":
xPos, yPos = message[1]
self.send((0xB0 + self.channel, self.positionCCNumbers[0],
int(xPos*127)), "outbox")
self.send((0xB0 + self.channel, self.positionCCNumbers[1],
int(yPos*127)), "outbox")
else:
noteNumber = self.mapping[address]
self.send((0x90 + self.channel, noteNumber, 64), "outbox")
if self.dataReady("control"):
msg = self.recv("control")
if (isinstance(msg, producerFinished) or
isinstance(msg, shutdownMicroprocess)):
self.send(msg, "signal")
break
if not self.anyReady():
self.pause()
yield 1
if __name__ == "__main__":
from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
from Kamaelia.Util.Console import ConsoleEchoer
FPS = 60
clock = Clock(float(1)/FPS).activate()
clock2 = Clock(float(1)/FPS).activate()
xyPad = XYPad().activate()
xyPad2 = XYPad(size=(200, 200), bouncingPuck = False, position = (210, 0),
bgcolour=(0, 0, 0), fgcolour=(255, 255, 255),
positionMsg="p2").activate()
ce = ConsoleEchoer().activate()
clock.link((clock, "outbox"), (xyPad, "newframe"))
clock2.link((clock2, "outbox"), (xyPad2, "newframe"))
xyPad.link((xyPad, "outbox"), (ce,"inbox"))
xyPad2.link((xyPad2, "outbox"), (ce,"inbox"))
Axon.Scheduler.scheduler.run.runThreads()
# from Kamaelia.Chassis.Graphline import Graphline
# from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
# from Kamaelia.Apps.Jam.Protocol.Midi import Midi
# Graphline(clock = Clock(1.0/60),
# xyPad = XYPad(),
# converter = XYPadMidiConverter(),
# midi = Midi(),
# linkages = {("clock", "outbox"):("xyPad","newframe"),
# ("xyPad", "outbox"):("converter","inbox"),
# ("converter","outbox"):("midi", "inbox")}).run()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import httplib as http
from flask import request
from flask import send_from_directory
from django.core.urlresolvers import reverse
from geoip import geolite2
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from osf.models import Institution
from website import util
from website import prereg
from website import settings
from website import language
from website.util import metrics
from website.util import paths
from website.util import sanitize
from website import maintenance
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile.utils import get_profile_image_url
from website.profile import views as profile_views
from website.project import views as project_views
from addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.preprints import views as preprint_views
from website.registries import views as registries_views
from website.reviews import views as reviews_views
from website.institutions import views as institution_views
from website.notifications import views as notification_views
from website.ember_osf_web import views as ember_osf_web_views
from website.closed_challenges import views as closed_challenges_views
from website.identifiers import views as identifier_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
user_institutions = [{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path_rounded_corners} for inst in user.affiliated_institutions.all()] if user else []
location = geolite2.lookup(request.remote_addr) if request.remote_addr else None
if request.host_url != settings.DOMAIN:
try:
inst_id = Institution.objects.get(domains__icontains=[request.host])._id
request_login_url = '{}institutions/{}'.format(settings.DOMAIN, inst_id)
except Institution.DoesNotExist:
request_login_url = request.url.replace(request.host_url, settings.DOMAIN)
else:
request_login_url = request.url
return {
'private_link_anonymous': is_private_link_anonymous_view(),
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._id if user else '',
'user_locale': user.locale if user and user.locale else '',
'user_timezone': user.timezone if user and user.timezone else '',
'user_url': user.url if user else '',
'user_profile_image': get_profile_image_url(user=user, size=25) if user else '',
'user_email_verifications': user.unconfirmed_email_info if user else [],
'user_api_url': user.api_url if user else '',
'user_entry_point': metrics.get_entry_point(user) if user else '',
'user_institutions': user_institutions if user else None,
'display_name': user.fullname if user else '',
'anon': {
'continent': getattr(location, 'continent', None),
'country': getattr(location, 'country', None),
},
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'prev_status': status.pop_previous_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'noteworthy_links_node': settings.NEW_AND_NOTEWORTHY_LINKS_NODE,
'popular_links_node': settings.POPULAR_LINKS_NODE,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(request_login_url),
'reauth_url': util.web_url_for('auth_logout', redirect_url=request.url, reauth=True),
'profile_url': cas.get_profile_url(),
'enable_institutions': settings.ENABLE_INSTITUTIONS,
'keen': {
'public': {
'project_id': settings.KEEN['public']['project_id'],
'write_key': settings.KEEN['public']['write_key'],
},
'private': {
'project_id': settings.KEEN['private']['project_id'],
'write_key': settings.KEEN['private']['write_key'],
},
},
'maintenance': maintenance.get_maintenance(),
'recaptcha_site_key': settings.RECAPTCHA_SITE_KEY,
'custom_citations': settings.CUSTOM_CITATIONS,
'osf_support_email': settings.OSF_SUPPORT_EMAIL,
'wafflejs_url': '{api_domain}{waffle_url}'.format(api_domain=settings.API_DOMAIN.rstrip('/'), waffle_url=reverse('wafflejs'))
}
def is_private_link_anonymous_view():
# Avoid circular import
from osf.models import PrivateLink
try:
return PrivateLink.objects.filter(key=request.args.get('view_only')).values_list('anonymous', flat=True).get()
except PrivateLink.DoesNotExist:
return False
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', renderer=render_mako_string, trust=False)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def sitemap_file(path):
"""Serves the sitemap/* files."""
if path.endswith('.xml.gz'):
mime = 'application/x-gzip'
elif path.endswith('.xml'):
mime = 'text/xml'
else:
raise HTTPError(http.NOT_FOUND)
return send_from_directory(
settings.STATIC_FOLDER + '/sitemaps/',
path,
mimetype=mime
)
def ember_app(path=None):
"""Serve the contents of the ember application"""
ember_app_folder = None
fp = path or 'index.html'
for k in settings.EXTERNAL_EMBER_APPS.keys():
if request.path.strip('/').startswith(k):
ember_app_folder = os.path.abspath(os.path.join(os.getcwd(), settings.EXTERNAL_EMBER_APPS[k]['path']))
break
if not ember_app_folder:
raise HTTPError(http.NOT_FOUND)
if not os.path.abspath(os.path.join(ember_app_folder, fp)).startswith(ember_app_folder):
# Prevent accessing files outside of the ember build dir
raise HTTPError(http.NOT_FOUND)
if not os.path.isfile(os.path.join(ember_app_folder, fp)):
fp = 'index.html'
return send_from_directory(ember_app_folder, fp)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('index'))
status.push_status_message(language.LOGOUT, kind='success', trust=False)
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule(
'/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string, trust=False)
),
Rule(
'/api/v1/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
json_renderer
),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
Rule('/sitemaps/<path>', 'get', sitemap_file, json_renderer),
])
# Ember Applications
if settings.USE_EXTERNAL_EMBER:
# Routes that serve up the Ember application. Hide behind feature flag.
for prefix in settings.EXTERNAL_EMBER_APPS.keys():
process_rules(app, [
Rule(
[
'/<provider>/<guid>/download',
'/<provider>/<guid>/download/',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid_download,
notemplate,
endpoint_suffix='__' + prefix
),
], prefix='/' + prefix)
process_rules(app, [
Rule(
[
'/',
'/<path:path>',
],
'get',
ember_app,
json_renderer,
endpoint_suffix='__' + prefix
),
], prefix='/' + prefix)
if settings.EXTERNAL_EMBER_APPS.get('ember_osf_web'):
process_rules(app, [
Rule(
ember_osf_web_views.routes,
'get',
ember_osf_web_views.use_ember_app,
notemplate
)
])
### Base ###
process_rules(app, [
Rule(
'/dashboard/',
'get',
website_views.dashboard,
OsfWebRenderer('home.mako', trust=False)
),
Rule(
'/myprojects/',
'get',
website_views.my_projects,
OsfWebRenderer('my_projects.mako', trust=False)
),
Rule(
'/reproducibility/',
'get',
website_views.reproducibility,
notemplate
),
Rule('/about/', 'get', website_views.redirect_about, notemplate),
Rule('/help/', 'get', website_views.redirect_help, notemplate),
Rule('/faq/', 'get', website_views.redirect_faq, notemplate),
Rule(['/getting-started/', '/getting-started/email/', '/howosfworks/'], 'get', website_views.redirect_getting_started, notemplate),
Rule('/support/', 'get', {}, OsfWebRenderer('public/pages/support.mako', trust=False)),
Rule(
'/explore/',
'get',
discovery_views.redirect_explore_to_activity,
notemplate
),
Rule(
[
'/messages/',
],
'get',
{},
OsfWebRenderer('public/comingsoon.mako', trust=False)
),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako', trust=False),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako', trust=False),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako', trust=False),
),
Rule(
'/api/v1/meetings/submissions/',
'get',
conference_views.conference_submissions,
json_renderer,
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule(
'/news/',
'get',
website_views.redirect_to_cos_news,
notemplate
),
Rule(
'/erpc/',
'get',
closed_challenges_views.erpc_landing_page,
OsfWebRenderer('erpc_landing_page.mako', trust=False)
),
Rule(
'/prereg/',
'get',
prereg.prereg_landing_page,
OsfWebRenderer('prereg_landing_page.mako', trust=False)
),
Rule(
'/preprints/',
'get',
preprint_views.preprint_landing_page,
OsfWebRenderer('public/pages/preprint_landing.mako', trust=False),
),
Rule(
'/registries/',
'get',
registries_views.registries_landing_page,
OsfWebRenderer('public/pages/registries_landing.mako', trust=False),
),
Rule(
'/reviews/',
'get',
reviews_views.reviews_landing_page,
OsfWebRenderer('public/pages/reviews_landing.mako', trust=False),
),
Rule(
'/preprint/',
'get',
preprint_views.preprint_redirect,
notemplate,
),
Rule(
'/api/v1/<campaign>/draft_registrations/',
'get',
prereg.prereg_draft_registrations,
json_renderer,
),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako', trust=False),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
process_rules(app, [
Rule('/confirmed_emails/', 'put', auth_views.unconfirmed_email_add, json_renderer),
Rule('/confirmed_emails/', 'delete', auth_views.unconfirmed_email_remove, json_renderer)
], prefix='/api/v1')
### Metadata ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule(
'/explore/activity/',
'get',
discovery_views.redirect_explore_activity_to_activity,
notemplate
),
Rule(
'/activity/',
'get',
discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako', trust=False)
),
])
### Auth ###
process_rules(app, [
# confirm email
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
notemplate
),
# confirm email for login through external identity provider
Rule(
'/confirm/external/<uid>/<token>/',
'get',
auth_views.external_login_confirm_email_get,
notemplate
),
# reset password get
Rule(
'/resetpassword/<uid>/<token>/',
'get',
auth_views.reset_password_get,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# reset password post
Rule(
'/resetpassword/<uid>/<token>/',
'post',
auth_views.reset_password_post,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# resend confirmation get
Rule(
'/resend/',
'get',
auth_views.resend_confirmation_get,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# resend confirmation post
Rule(
'/resend/',
'post',
auth_views.resend_confirmation_post,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# oauth user email get
Rule(
'/external-login/email',
'get',
auth_views.external_login_email_get,
OsfWebRenderer('external_login_email.mako', render_mako_string, trust=False)
),
# oauth user email post
Rule(
'/external-login/email',
'post',
auth_views.external_login_email_post,
OsfWebRenderer('external_login_email.mako', render_mako_string, trust=False)
),
# user sign up page
Rule(
'/register/',
'get',
auth_views.auth_register,
OsfWebRenderer('public/register.mako', trust=False)
),
# osf login and campaign login
Rule(
[
'/login/',
'/account/'
],
'get',
auth_views.auth_login,
notemplate
),
# create user account via api
Rule(
'/api/v1/register/',
'post',
auth_views.register_user,
json_renderer
),
# osf logout and cas logout
Rule(
'/logout/',
'get',
auth_views.auth_logout,
notemplate
),
# forgot password get
Rule(
'/forgotpassword/',
'get',
auth_views.forgot_password_get,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
# forgot password post
Rule(
'/forgotpassword/',
'post',
auth_views.forgot_password_post,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
Rule(
'/login/connected_tools/',
'get',
landing_page_views.connected_tools,
notemplate
),
Rule(
'/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
notemplate
),
])
### Profile ###
# Web
process_rules(app, [
Rule(
'/profile/',
'get',
profile_views.profile_view,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
'/profile/<uid>/',
'get',
profile_views.profile_view_id,
OsfWebRenderer('profile.mako', trust=False)
),
# unregistered user claim account (contributor-ship of a project)
# user will be required to set email and password
# claim token must be present in query parameter
Rule(
['/user/<uid>/<pid>/claim/'],
['get', 'post'],
project_views.contributor.claim_user_form,
OsfWebRenderer('claim_account.mako', trust=False)
),
# registered user claim account (contributor-ship of a project)
# user will be required to verify password
# claim token must be present in query parameter
Rule(
['/user/<uid>/<pid>/claim/verify/<token>/'],
['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako', trust=False)
),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako', trust=False),
),
Rule(
[
'/project/<pid>/addons/',
'/project/<pid>/node/<nid>/addons/',
],
'get',
project_views.node.node_addons,
OsfWebRenderer('project/addons.mako', trust=False)
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako', trust=False),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako', trust=False),
),
Rule(
'/settings/applications/',
'get',
profile_views.oauth_application_list,
OsfWebRenderer('profile/oauth_app_list.mako', trust=False)
),
Rule(
'/settings/applications/create/',
'get',
profile_views.oauth_application_register,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/applications/<client_id>/',
'get',
profile_views.oauth_application_detail,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/tokens/',
'get',
profile_views.personal_access_token_list,
OsfWebRenderer('profile/personal_tokens_list.mako', trust=False)
),
Rule(
'/settings/tokens/create/',
'get',
profile_views.personal_access_token_register,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
Rule(
'/settings/tokens/<_id>/',
'get',
profile_views.personal_access_token_detail,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
)
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view_json, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/resend/', 'put', profile_views.resend_confirmation, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id_json, json_renderer),
# Used by profile.html
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
'/profile/export/',
'post',
profile_views.request_export,
json_renderer,
),
Rule(
'/profile/deactivate/',
'post',
profile_views.request_deactivation,
json_renderer,
),
Rule(
'/profile/cancel_request_deactivation/',
'post',
profile_views.cancel_request_deactivation,
json_renderer,
),
Rule(
'/profile/logins/',
'patch',
profile_views.delete_external_identity,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule(
'/search/',
'get',
{'shareUrl': settings.SHARE_URL},
OsfWebRenderer('search.mako', trust=False)
),
Rule(
'/share/registration/',
'get',
{'register': settings.SHARE_REGISTRATION_URL},
json_renderer
),
Rule(
'/api/v1/user/search/',
'get', search_views.search_contributor,
json_renderer
),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/search/', 'get', website_views.legacy_share_v1_search, json_renderer),
], prefix='/api/v1')
# Institution
process_rules(app, [
Rule('/institutions/<inst_id>/', 'get', institution_views.view_institution, OsfWebRenderer('institution.mako', trust=False))
])
# Project
# Web
process_rules(app, [
# '/' route loads home.mako if logged in, otherwise loads landing.mako
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako', trust=False)),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('landing.mako', trust=False)),
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'get',
project_views.node.view_project,
OsfWebRenderer('project/project.mako', trust=False)
),
# Create a new subproject/component
Rule(
'/project/<pid>/newnode/',
'post',
project_views.node.project_new_node,
notemplate
),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako', trust=False),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako', trust=False)
),
# Permissions
Rule( # TODO: Where, if anywhere, is this route used?
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako', trust=False)
),
### Logs ###
# View forks
Rule(
[
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
],
'get',
project_views.node.node_forks,
OsfWebRenderer('project/forks.mako', trust=False)
),
# Registrations
Rule(
[
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
],
'get',
project_views.register.node_register_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/register/<metaschema_id>/',
'/project/<pid>/node/<nid>/register/<metaschema_id>/',
],
'get',
project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'get',
project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'post',
project_views.drafts.new_draft_registration,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/',
],
'get',
project_views.drafts.edit_draft_registration_page,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
],
'get',
project_views.drafts.draft_before_register_page,
OsfWebRenderer('project/register_draft.mako', trust=False)),
Rule(
[
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/',
],
'get',
project_views.register.node_registration_retraction_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/',
],
'get',
project_views.register.node_registration_retraction_get,
OsfWebRenderer('project/retract_registration.mako', trust=False)
),
Rule(
'/ids/<category>/<path:value>/',
'get',
project_views.register.get_referent_by_identifier,
notemplate,
),
Rule(
[
'/project/<pid>/analytics/',
'/project/<pid>/node/<nid>/analytics/',
],
'get',
project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako', trust=False)
),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
# TODO: Start waterbutler to test
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako', trust=False),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
'/api/v1/project/<pid>/files/<provider>/<path:path>/',
'/api/v1/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
json_renderer
),
Rule(
[
'/project/<pid>/files/deleted/<trashed_id>/',
'/project/<pid>/node/<nid>/files/deleted/<trashed_id>/',
],
'get',
addon_views.addon_deleted_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
OsfWebRenderer('project/view_file.mako', trust=False),
),
Rule(
[
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
Rule(
[
'/quickfiles/<fid>/'
],
'get',
addon_views.addon_view_or_download_quickfile,
json_renderer
)
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by [coming replacement]
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
# Draft Registrations
Rule([
'/project/<pid>/drafts/',
], 'get', project_views.drafts.get_draft_registrations, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'get', project_views.drafts.get_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'put', project_views.drafts.update_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'delete', project_views.drafts.delete_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/submit/',
], 'post', project_views.drafts.submit_draft_for_review, json_renderer),
# Meta Schemas
Rule([
'/project/drafts/schemas/',
], 'get', project_views.drafts.get_metaschemas, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule(
[
'/project/<pid>/contributor/remove/',
'/project/<pid>/node/<nid>/contributor/remove/',
],
'POST',
project_views.contributor.project_remove_contributor,
json_renderer,
),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Update
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'put',
project_views.node.update_node,
json_renderer,
),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Add / remove tags
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'post', project_views.tag.project_add_tag, json_renderer),
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'delete', project_views.tag.project_remove_tag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
], 'post', project_views.drafts.register_draft_registration, json_renderer),
Rule([
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/'
], 'post', project_views.register.node_registration_retraction_post, json_renderer),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'post',
identifier_views.node_identifiers_post,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/registration/<pid>/callbacks/',
],
'put',
project_views.register.registration_callbacks,
json_renderer,
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/tree/',
'/project/<pid>/node/<nid>/tree/'
],
'get',
project_views.node.get_node_tree,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
)
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('addons')
if settings.DEV_MODE:
from flask import stream_with_context, Response
import requests
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
@app.route('/ember-cli-live-reload.js')
def ember_cli_live_reload():
req = requests.get('{}/ember-cli-live-reload.js'.format(settings.LIVE_RELOAD_DOMAIN), stream=True)
return Response(stream_with_context(req.iter_content()), content_type=req.headers['content-type'])
|
|
# Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import netaddr
import uuid
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import networks_associate
from nova.api.openstack.compute.contrib import os_networks as networks
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
FAKE_NETWORKS = [
{
'bridge': 'br100', 'vpn_public_port': 1000,
'dhcp_start': '10.0.0.3', 'bridge_interface': 'eth0',
'updated_at': '2011-08-16 09:26:13.048257',
'id': 1, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
'cidr_v6': None, 'deleted_at': None,
'gateway': '10.0.0.1', 'label': 'mynet_0',
'project_id': '1234', 'rxtx_base': None,
'vpn_private_address': '10.0.0.2', 'deleted': False,
'vlan': 100, 'broadcast': '10.0.0.7',
'netmask': '255.255.255.248', 'injected': False,
'cidr': '10.0.0.0/29',
'vpn_public_address': '127.0.0.1', 'multi_host': False,
'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop',
'gateway_v6': None, 'netmask_v6': None, 'priority': None,
'created_at': '2011-08-15 06:19:19.387525',
},
{
'bridge': 'br101', 'vpn_public_port': 1001,
'dhcp_start': '10.0.0.11', 'bridge_interface': 'eth0',
'updated_at': None, 'id': 2, 'cidr_v6': None,
'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
'deleted_at': None, 'gateway': '10.0.0.9',
'label': 'mynet_1', 'project_id': None,
'vpn_private_address': '10.0.0.10', 'deleted': False,
'vlan': 101, 'broadcast': '10.0.0.15', 'rxtx_base': None,
'netmask': '255.255.255.248', 'injected': False,
'cidr': '10.0.0.10/29', 'vpn_public_address': None,
'multi_host': False, 'dns1': None, 'dns2': None, 'host': None,
'gateway_v6': None, 'netmask_v6': None, 'priority': None,
'created_at': '2011-08-15 06:19:19.885495',
},
]
FAKE_USER_NETWORKS = [
{
'id': 1, 'cidr': '10.0.0.0/29', 'netmask': '255.255.255.248',
'gateway': '10.0.0.1', 'broadcast': '10.0.0.7', 'dns1': None,
'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_0',
'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
},
{
'id': 2, 'cidr': '10.0.0.10/29', 'netmask': '255.255.255.248',
'gateway': '10.0.0.9', 'broadcast': '10.0.0.15', 'dns1': None,
'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_1',
'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
},
]
NEW_NETWORK = {
"network": {
"bridge_interface": "eth0",
"cidr": "10.20.105.0/24",
"label": "new net 111",
"vlan_start": 111,
}
}
class FakeNetworkAPI(object):
_sentinel = object()
def __init__(self):
self.networks = copy.deepcopy(FAKE_NETWORKS)
def delete(self, context, network_id):
for i, network in enumerate(self.networks):
if network['id'] == network_id:
del self.networks[0]
return True
raise exception.NetworkNotFoundForUUID(uuid=network_id)
def disassociate(self, context, network_uuid):
for network in self.networks:
if network.get('uuid') == network_uuid:
network['project_id'] = None
return True
raise exception.NetworkNotFound(network_id=network_uuid)
def associate(self, context, network_uuid, host=_sentinel,
project=_sentinel):
for network in self.networks:
if network.get('uuid') == network_uuid:
if host is not FakeNetworkAPI._sentinel:
network['host'] = host
if project is not FakeNetworkAPI._sentinel:
network['project_id'] = project
return True
raise exception.NetworkNotFound(network_id=network_uuid)
def add_network_to_project(self, context,
project_id, network_uuid=None):
if network_uuid:
for network in self.networks:
if network.get('project_id', None) is None:
network['project_id'] = project_id
return
return
for network in self.networks:
if network.get('uuid') == network_uuid:
network['project_id'] = project_id
return
def get_all(self, context):
return self.networks
def get(self, context, network_id):
for network in self.networks:
if network.get('uuid') == network_id:
return network
raise exception.NetworkNotFound(network_id=network_id)
def create(self, context, **kwargs):
subnet_bits = int(math.ceil(math.log(kwargs.get(
'network_size', CONF.network_size), 2)))
fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr'])
prefixlen_v4 = 32 - subnet_bits
subnets_v4 = list(fixed_net_v4.subnet(
prefixlen_v4,
count=kwargs.get('num_networks', CONF.num_networks)))
new_networks = []
new_id = max((net['id'] for net in self.networks))
for index, subnet_v4 in enumerate(subnets_v4):
new_id += 1
net = {'id': new_id, 'uuid': str(uuid.uuid4())}
net['cidr'] = str(subnet_v4)
net['netmask'] = str(subnet_v4.netmask)
net['gateway'] = kwargs.get('gateway') or str(subnet_v4[1])
net['broadcast'] = str(subnet_v4.broadcast)
net['dhcp_start'] = str(subnet_v4[2])
for key in FAKE_NETWORKS[0].iterkeys():
net.setdefault(key, kwargs.get(key))
new_networks.append(net)
self.networks += new_networks
return new_networks
class NetworksTest(test.TestCase):
def setUp(self):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
self.controller = networks.NetworkController(
self.fake_network_api)
self.associate_controller = networks_associate\
.NetworkAssociateActionController(self.fake_network_api)
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@staticmethod
def network_uuid_to_id(network):
network['id'] = network['uuid']
del network['uuid']
def test_network_list_all_as_user(self):
self.maxDiff = None
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
res_dict = self.controller.index(req)
expected = copy.deepcopy(FAKE_USER_NETWORKS)
for network in expected:
self.network_uuid_to_id(network)
self.assertEquals(res_dict, {'networks': expected})
def test_network_list_all_as_admin(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
req.environ["nova.context"].is_admin = True
res_dict = self.controller.index(req)
expected = copy.deepcopy(FAKE_NETWORKS)
for network in expected:
self.network_uuid_to_id(network)
self.assertEquals(res_dict, {'networks': expected})
def test_network_disassociate(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
res = self.controller._disassociate_host_and_project(
req, uuid, {'disassociate': None})
self.assertEqual(res.status_int, 202)
self.assertEqual(self.fake_network_api.networks[0]['project_id'], None)
self.assertEqual(self.fake_network_api.networks[0]['host'], None)
def test_network_disassociate_host_only(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
res = self.associate_controller._disassociate_host_only(
req, uuid, {'disassociate_host': None})
self.assertEqual(res.status_int, 202)
self.assertNotEqual(self.fake_network_api.networks[0]['project_id'],
None)
self.assertEqual(self.fake_network_api.networks[0]['host'], None)
def test_network_disassociate_project_only(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
res = self.associate_controller._disassociate_project_only(
req, uuid, {'disassociate_project': None})
self.assertEqual(res.status_int, 202)
self.assertEqual(self.fake_network_api.networks[0]['project_id'], None)
self.assertNotEqual(self.fake_network_api.networks[0]['host'], None)
def test_network_disassociate_not_found(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/100/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._disassociate_host_and_project,
req, 100, {'disassociate': None})
def test_network_get_as_user(self):
uuid = FAKE_USER_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected = {'network': copy.deepcopy(FAKE_USER_NETWORKS[0])}
self.network_uuid_to_id(expected['network'])
self.assertEqual(res_dict, expected)
def test_network_get_as_admin(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
req.environ["nova.context"].is_admin = True
res_dict = self.controller.show(req, uuid)
expected = {'network': copy.deepcopy(FAKE_NETWORKS[0])}
self.network_uuid_to_id(expected['network'])
self.assertEqual(res_dict, expected)
def test_network_get_not_found(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/100')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 100)
def test_network_delete(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
res = self.controller.delete(req, 1)
self.assertEqual(res.status_int, 202)
def test_network_delete_not_found(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/100')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, 100)
def test_network_add(self):
uuid = FAKE_NETWORKS[1]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add')
res = self.controller.add(req, {'id': uuid})
self.assertEqual(res.status_int, 202)
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
req.environ["nova.context"].is_admin = True
res_dict = self.controller.show(req, uuid)
self.assertEqual(res_dict['network']['project_id'], 'fake')
def test_network_associate_with_host(self):
uuid = FAKE_NETWORKS[1]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
res = self.associate_controller._associate_host(
req, uuid, {'associate_host': "TestHost"})
self.assertEqual(res.status_int, 202)
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
req.environ["nova.context"].is_admin = True
res_dict = self.controller.show(req, uuid)
self.assertEqual(res_dict['network']['host'], 'TestHost')
def test_network_create(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
res_dict = self.controller.create(req, NEW_NETWORK)
self.assertTrue('network' in res_dict)
uuid = res_dict['network']['id']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
res_dict = self.controller.show(req, uuid)
self.assertTrue(res_dict['network']['label'].
startswith(NEW_NETWORK['network']['label']))
def test_network_create_large(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
large_network = copy.deepcopy(NEW_NETWORK)
large_network['network']['cidr'] = '128.0.0.0/4'
res_dict = self.controller.create(req, large_network)
self.assertEqual(res_dict['network']['cidr'],
large_network['network']['cidr'])
|
|
# -*- coding: utf-8 -*-
import logging
import itertools
import math
import httplib as http
from modularodm import Q
from flask import request
from framework import utils
from framework import sentry
from framework.auth.core import User
from framework.flask import redirect # VOL-aware redirect
from framework.routing import proxy_url
from framework.exceptions import HTTPError
from framework.auth.forms import SignInForm
from framework.forms import utils as form_utils
from framework.guid.model import GuidStoredObject
from framework.auth.forms import RegistrationForm
from framework.auth.forms import ResetPasswordForm
from framework.auth.forms import ForgotPasswordForm
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from website.models import Guid
from website.models import Node
from website.util import rubeus
from website.util import sanitize
from website.project import model
from website.util import web_url_for
from website.util import permissions
from website.project import new_dashboard
from website.settings import ALL_MY_PROJECTS_ID
from website.settings import ALL_MY_REGISTRATIONS_ID
logger = logging.getLogger(__name__)
def _rescale_ratio(auth, nodes):
"""Get scaling denominator for log lists across a sequence of nodes.
:param nodes: Nodes
:return: Max number of logs
"""
if not nodes:
return 0
counts = [
len(node.logs)
for node in nodes
if node.can_view(auth)
]
if counts:
return float(max(counts))
return 0.0
def _render_node(node, auth=None):
"""
:param node:
:return:
"""
perm = None
# NOTE: auth.user may be None if viewing public project while not
# logged in
if auth and auth.user and node.get_permissions(auth.user):
perm_list = node.get_permissions(auth.user)
perm = permissions.reduce_permissions(perm_list)
return {
'title': node.title,
'id': node._primary_key,
'url': node.url,
'api_url': node.api_url,
'primary': node.primary,
'date_modified': utils.iso8601format(node.date_modified),
'category': node.category,
'permissions': perm, # A string, e.g. 'admin', or None,
'archiving': node.archiving,
}
def _render_nodes(nodes, auth=None, show_path=False):
"""
:param nodes:
:return:
"""
ret = {
'nodes': [
_render_node(node, auth)
for node in nodes
],
'rescale_ratio': _rescale_ratio(auth, nodes),
'show_path': show_path
}
return ret
@collect_auth
def index(auth):
"""Redirect to dashboard if user is logged in, else show homepage.
"""
if auth.user:
return redirect(web_url_for('dashboard'))
return {}
def find_dashboard(user):
dashboard_folder = user.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if dashboard_folder.count() == 0:
new_dashboard(user)
dashboard_folder = user.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
return dashboard_folder[0]
@must_be_logged_in
def get_dashboard(auth, nid=None, **kwargs):
user = auth.user
if nid is None:
node = find_dashboard(user)
dashboard_projects = [rubeus.to_project_root(node, auth, **kwargs)]
return_value = {'data': dashboard_projects}
elif nid == ALL_MY_PROJECTS_ID:
return_value = {'data': get_all_projects_smart_folder(**kwargs)}
elif nid == ALL_MY_REGISTRATIONS_ID:
return_value = {'data': get_all_registrations_smart_folder(**kwargs)}
else:
node = Node.load(nid)
dashboard_projects = rubeus.to_project_hgrid(node, auth, **kwargs)
return_value = {'data': dashboard_projects}
return_value['timezone'] = user.timezone
return_value['locale'] = user.locale
return_value['id'] = user._id
return return_value
@must_be_logged_in
def get_all_projects_smart_folder(auth, **kwargs):
# TODO: Unit tests
user = auth.user
contributed = user.node__contributed
nodes = contributed.find(
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False) &
Q('is_folder', 'eq', False)
).sort('-title')
keys = nodes.get_keys()
return [rubeus.to_project_root(node, auth, **kwargs) for node in nodes if node.parent_id not in keys]
@must_be_logged_in
def get_all_registrations_smart_folder(auth, **kwargs):
# TODO: Unit tests
user = auth.user
contributed = user.node__contributed
nodes = contributed.find(
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', True) &
Q('is_folder', 'eq', False)
).sort('-title')
# Note(hrybacki): is_retracted and pending_embargo are property methods
# and cannot be directly queried
nodes = filter(lambda node: not node.is_retracted and not node.pending_embargo, nodes)
keys = [node._id for node in nodes]
return [rubeus.to_project_root(node, auth, **kwargs) for node in nodes if node.ids_above.isdisjoint(keys)]
@must_be_logged_in
def get_dashboard_nodes(auth):
"""Get summary information about the current user's dashboard nodes.
:param-query no_components: Exclude components from response.
NOTE: By default, components will only be shown if the current user
is contributor on a comonent but not its parent project. This query
parameter forces ALL components to be excluded from the request.
:param-query permissions: Filter upon projects for which the current user
has the specified permissions. Examples: 'write', 'admin'
"""
user = auth.user
contributed = user.node__contributed # nodes user contributed to
nodes = contributed.find(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False) &
Q('is_folder', 'eq', False)
)
if request.args.get('no_components') not in [True, 'true', 'True', '1', 1]:
comps = contributed.find(
# components only
Q('category', 'ne', 'project') &
# exclude deleted nodes
Q('is_deleted', 'eq', False) &
# exclude registrations
Q('is_registration', 'eq', False)
)
else:
comps = []
nodes = list(nodes) + list(comps)
if request.args.get('permissions'):
perm = request.args['permissions'].strip().lower()
if perm not in permissions.PERMISSIONS:
raise HTTPError(http.BAD_REQUEST, dict(
message_short='Invalid query parameter',
message_long='{0} is not in {1}'.format(perm, permissions.PERMISSIONS)
))
response_nodes = [node for node in nodes if node.has_permission(user, permission=perm)]
else:
response_nodes = nodes
return _render_nodes(response_nodes, auth)
@must_be_logged_in
def dashboard(auth):
user = auth.user
dashboard_folder = find_dashboard(user)
dashboard_id = dashboard_folder._id
return {'addons_enabled': user.get_addon_names(),
'dashboard_id': dashboard_id,
}
def paginate(items, total, page, size):
pages = math.ceil(total / float(size))
if page < 0 or (pages and page >= pages):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
start = page * size
paginated_items = itertools.islice(items, start, start + size)
return paginated_items, pages
@must_be_logged_in
def watched_logs_get(**kwargs):
user = kwargs['auth'].user
try:
page = int(request.args.get('page', 0))
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
try:
size = int(request.args.get('size', 10))
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "size".'
))
total = sum(1 for x in user.get_recent_log_ids())
paginated_logs, pages = paginate(user.get_recent_log_ids(), total, page, size)
logs = (model.NodeLog.load(id) for id in paginated_logs)
return {
"logs": [serialize_log(log) for log in logs],
"total": total,
"pages": pages,
"page": page
}
def serialize_log(node_log, auth=None, anonymous=False):
'''Return a dictionary representation of the log.'''
return {
'id': str(node_log._primary_key),
'user': node_log.user.serialize()
if isinstance(node_log.user, User)
else {'fullname': node_log.foreign_user},
'contributors': [node_log._render_log_contributor(c) for c in node_log.params.get("contributors", [])],
'api_key': node_log.api_key.label if node_log.api_key else '',
'action': node_log.action,
'params': sanitize.safe_unescape_html(node_log.params),
'date': utils.iso8601format(node_log.date),
'node': node_log.node.serialize(auth) if node_log.node else None,
'anonymous': anonymous
}
def reproducibility():
return redirect('/ezcuj/wiki')
def registration_form():
return form_utils.jsonify(RegistrationForm(prefix='register'))
def signin_form():
return form_utils.jsonify(SignInForm())
def forgot_password_form():
return form_utils.jsonify(ForgotPasswordForm(prefix='forgot_password'))
def reset_password_form():
return form_utils.jsonify(ResetPasswordForm())
# GUID ###
def _build_guid_url(base, suffix=None):
url = '/'.join([
each.strip('/') for each in [base, suffix]
if each
])
return u'/{0}/'.format(url)
def resolve_guid(guid, suffix=None):
"""Load GUID by primary key, look up the corresponding view function in the
routing table, and return the return value of the view function without
changing the URL.
:param str guid: GUID primary key
:param str suffix: Remainder of URL after the GUID
:return: Return value of proxied view function
"""
# Look up GUID
guid_object = Guid.load(guid)
if guid_object:
# verify that the object is a GuidStoredObject descendant. If a model
# was once a descendant but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a redirect_mode attribute or otherwise don't behave as
# expected.
if not isinstance(guid_object.referent, GuidStoredObject):
sentry.log_message(
'Guid `{}` resolved to non-guid object'.format(guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
if not referent.deep_url:
raise HTTPError(http.NOT_FOUND)
url = _build_guid_url(referent.deep_url, suffix)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(guid.lower(), suffix)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
|